def plots_memmixtcurves_fig6fig13(self, num_repetitions=1, cache=True):
        """
            Plots the memory fidelity for all T and the mixture proportions for all T
        """

        if self.result_em_fits_stats is None:
            search_progress = progress.Progress(self.fit_exp.T_space.size * num_repetitions)
            # kappa, mixt_target, mixt_nontarget, mixt_random, ll
            result_em_fits = np.nan * np.ones((self.fit_exp.T_space.size, 5, num_repetitions))

            for T_i, T in enumerate(self.fit_exp.T_space):
                self.fit_exp.setup_experimental_stimuli_T(T)

                for repet_i in xrange(num_repetitions):
                    print "%.2f%%, %s left - %s" % (
                        search_progress.percentage(),
                        search_progress.time_remaining_str(),
                        search_progress.eta_str(),
                    )
                    print "Fit for T=%d, %d/%d" % (T, repet_i + 1, num_repetitions)

                    self.fit_exp.sampler.force_sampling_round()

                    # Fit mixture model
                    curr_params_fit = self.fit_exp.sampler.fit_mixture_model(use_all_targets=False)
                    result_em_fits[T_i, :, repet_i] = [
                        curr_params_fit[key]
                        for key in ("kappa", "mixt_target", "mixt_nontargets_sum", "mixt_random", "train_LL")
                    ]

                    search_progress.increment()

            # Get stats of EM Fits
            self.result_em_fits_stats = dict(
                mean=utils.nanmean(result_em_fits, axis=-1), std=utils.nanstd(result_em_fits, axis=-1)
            )

        # Do the plots
        if self.do_memcurves_fig6 and self.do_mixtcurves_fig13:
            f, axes = plt.subplots(nrows=2, figsize=(14, 18))
        else:
            f, ax = plt.subplots(figsize=(14, 9))
            axes = [ax]
        ax_i = 0

        if self.do_memcurves_fig6:
            self.__plot_memcurves(self.result_em_fits_stats, suptitle_text="Memory fidelity", ax=axes[ax_i])
            ax_i += 1

        if self.do_mixtcurves_fig13:
            self.__plot_mixtcurves(self.result_em_fits_stats, suptitle_text="Mixture proportions", ax=axes[ax_i])

        return axes
def plots_specific_stimuli_hierarchical(data_pbs, generator_module=None):
    '''
        Reload and plot behaviour of mixed population code on specific Stimuli
        of 3 items.
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plot_per_min_dist_all = False
    specific_plots_paper = False
    plots_emfit_allitems = False
    plot_min_distance_effect = True

    should_fit_allitems_model = True
    # caching_emfit_filename = None
    caching_emfit_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'cache_emfitallitems_uniquekappa.pickle')


    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_all_precisions_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_em_fits_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_fits_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_kappastddev_mean = utils.nanmean(utils.kappa_to_stddev(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results'])[..., 0, :]), axis=-1)
    result_em_kappastddev_std = utils.nanstd(utils.kappa_to_stddev(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results'])[..., 0, :]), axis=-1)
    result_responses_all = np.squeeze(data_pbs.dict_arrays['result_responses']['results'])
    result_target_all = np.squeeze(data_pbs.dict_arrays['result_target']['results'])
    result_nontargets_all = np.squeeze(data_pbs.dict_arrays['result_nontargets']['results'])

    all_args = data_pbs.loaded_data['args_list']

    nb_repetitions = np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']).shape[-1]
    print nb_repetitions
    nb_repetitions = result_responses_all.shape[-1]
    print nb_repetitions
    K = result_nontargets_all.shape[-2]
    N = result_responses_all.shape[-2]


    enforce_min_distance_space = data_pbs.loaded_data['parameters_uniques']['enforce_min_distance']
    sigmax_space = data_pbs.loaded_data['parameters_uniques']['sigmax']

    MMlower_valid_space = data_pbs.loaded_data['datasets_list'][0]['MMlower_valid_space']
    ratio_space = MMlower_valid_space[:, 0]/float(np.sum(MMlower_valid_space[0]))

    print enforce_min_distance_space
    print sigmax_space
    print MMlower_valid_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape

    dataio = DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    # Relaod cached emfitallitems
    if caching_emfit_filename is not None:
        if os.path.exists(caching_emfit_filename):
            # Got file, open it and try to use its contents
            try:
                with open(caching_emfit_filename, 'r') as file_in:
                    # Load and assign values
                    cached_data = pickle.load(file_in)
                    result_emfitallitems = cached_data['result_emfitallitems']
                    should_fit_allitems_model = False

            except IOError:
                print "Error while loading ", caching_emfit_filename, "falling back to computing the EM fits"

    if plot_per_min_dist_all:
        # Do one plot per min distance.
        for min_dist_i, min_dist in enumerate(enforce_min_distance_space):
            # Show log precision
            utils.pcolor_2d_data(result_all_precisions_mean[min_dist_i].T, x=ratio_space, y=sigmax_space, xlabel='ratio layer two', ylabel='sigma_x', title='Precision, min_dist=%.3f' % min_dist)
            if savefigs:
                dataio.save_current_figure('precision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Show log precision
            utils.pcolor_2d_data(result_all_precisions_mean[min_dist_i].T, x=ratio_space, y=sigmax_space, xlabel='ratio layer two', ylabel='sigma_x', title='Precision, min_dist=%.3f' % min_dist, log_scale=True)
            if savefigs:
                dataio.save_current_figure('logprecision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)


            # Plot estimated model precision (kappa)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 0].T, x=ratio_space, y=sigmax_space, xlabel='ratio layer two', ylabel='sigma_x', title='EM precision, min_dist=%.3f' % min_dist, log_scale=False)
            if savefigs:
                dataio.save_current_figure('logemprecision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Plot estimated Target, nontarget and random mixture components, in multiple subplots
            _, axes = plt.subplots(1, 3, figsize=(18, 6))
            plt.subplots_adjust(left=0.05, right=0.97, wspace = 0.3, bottom=0.15)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 1].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Target, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[0], ticks_interpolate=5)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 2].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Nontarget, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[1], ticks_interpolate=5)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 3].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Random, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[2], ticks_interpolate=5)

            if savefigs:
                dataio.save_current_figure('em_mixtureprobs_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Plot Log-likelihood of Mixture model, sanity check
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., -1].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='EM loglik, min_dist=%.3f' % min_dist, log_scale=False)
            if savefigs:
                dataio.save_current_figure('em_loglik_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

    if specific_plots_paper:
        # We need to choose 3 levels of min_distances
        target_sigmax = 0.25
        target_mindist_low = 0.09
        target_mindist_medium = 0.36
        target_mindist_high = 1.5

        sigmax_level_i = np.argmin(np.abs(sigmax_space - target_sigmax))
        min_dist_level_low_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_low))
        min_dist_level_medium_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_medium))
        min_dist_level_high_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_high))

        ## Do for each distance
        # for min_dist_i in [min_dist_level_low_i, min_dist_level_medium_i, min_dist_level_high_i]:
        for min_dist_i in xrange(enforce_min_distance_space.size):

            # Plot precision
            if False:
                utils.plot_mean_std_area(ratio_space, result_all_precisions_mean[min_dist_i, sigmax_level_i], result_all_precisions_std[min_dist_i, sigmax_level_i]) #, xlabel='Ratio conjunctivity', ylabel='Precision of recall')
                # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
                plt.ylim([0, np.max(result_all_precisions_mean[min_dist_i, sigmax_level_i] + result_all_precisions_std[min_dist_i, sigmax_level_i])])

                if savefigs:
                    dataio.save_current_figure('mindist%.2f_precisionrecall_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot kappa fitted
            ax_handle = utils.plot_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 0], result_em_fits_std[min_dist_i, sigmax_level_i, :, 0]) #, xlabel='Ratio conjunctivity', ylabel='Fitted kappa')
            # Add distance between items in kappa units
            dist_items_kappa = utils.stddev_to_kappa(enforce_min_distance_space[min_dist_i])
            ax_handle.plot(ratio_space, dist_items_kappa*np.ones(ratio_space.size), 'k--', linewidth=3)
            plt.ylim([-0.1, np.max((np.max(result_em_fits_mean[min_dist_i, sigmax_level_i, :, 0] + result_em_fits_std[min_dist_i, sigmax_level_i, :, 0]), 1.1*dist_items_kappa))])
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emkappa_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot kappa-stddev fitted. Easier to visualize
            ax_handle = utils.plot_mean_std_area(ratio_space, result_em_kappastddev_mean[min_dist_i, sigmax_level_i], result_em_kappastddev_std[min_dist_i, sigmax_level_i]) #, xlabel='Ratio conjunctivity', ylabel='Fitted kappa_stddev')
            # Add distance between items in std dev units
            dist_items_std = (enforce_min_distance_space[min_dist_i])
            ax_handle.plot(ratio_space, dist_items_std*np.ones(ratio_space.size), 'k--', linewidth=3)
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            plt.ylim([0, 1.1*np.max((np.max(result_em_kappastddev_mean[min_dist_i, sigmax_level_i] + result_em_kappastddev_std[min_dist_i, sigmax_level_i]), dist_items_std))])
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emkappastddev_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])


            if False:
                # Plot LLH
                utils.plot_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, -1], result_em_fits_std[min_dist_i, sigmax_level_i, :, -1]) #, xlabel='Ratio conjunctivity', ylabel='Loglikelihood of Mixture model fit')
                # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
                if savefigs:
                    dataio.save_current_figure('mindist%.2f_emllh_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

                # Plot mixture parameters, std
                utils.plot_multiple_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 1:4].T, result_em_fits_std[min_dist_i, sigmax_level_i, :, 1:4].T)
                plt.ylim([0.0, 1.1])
                # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
                # plt.legend("Target", "Non-target", "Random")
                if savefigs:
                    dataio.save_current_figure('mindist%.2f_emprobs_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

                # Mixture parameters, SEM
                utils.plot_multiple_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 1:4].T, result_em_fits_std[min_dist_i, sigmax_level_i, :, 1:4].T/np.sqrt(nb_repetitions))
                plt.ylim([0.0, 1.1])
                # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
                # plt.legend("Target", "Non-target", "Random")
                if savefigs:
                    dataio.save_current_figure('mindist%.2f_emprobs_forpaper_sem_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

    if plots_emfit_allitems:
        # We need to choose 3 levels of min_distances
        target_sigmax = 0.25
        target_mindist_low = 0.15
        target_mindist_medium = 0.36
        target_mindist_high = 1.5

        sigmax_level_i = np.argmin(np.abs(sigmax_space - target_sigmax))
        min_dist_level_low_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_low))
        min_dist_level_medium_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_medium))
        min_dist_level_high_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_high))

        min_dist_i_plotting_space = np.array([min_dist_level_low_i, min_dist_level_medium_i, min_dist_level_high_i])

        if should_fit_allitems_model:

            # kappa, mixt_target, mixt_nontargets (K), mixt_random, LL, bic
            # result_emfitallitems = np.empty((min_dist_i_plotting_space.size, ratio_space.size, 2*K+5))*np.nan
            result_emfitallitems = np.empty((enforce_min_distance_space.size, ratio_space.size, K+5))*np.nan

            ## Do for each distance
            # for min_dist_plotting_i, min_dist_i in enumerate(min_dist_i_plotting_space):
            for min_dist_i in xrange(enforce_min_distance_space.size):
                # Fit the mixture model
                for ratio_i, ratio in enumerate(ratio_space):
                    print "Refitting EM all items. Ratio:", ratio, "Dist:", enforce_min_distance_space[min_dist_i]
                    em_fit = em_circularmixture_allitems_uniquekappa.fit(
                        result_responses_all[min_dist_i, sigmax_level_i, ratio_i].flatten(),
                        result_target_all[min_dist_i, sigmax_level_i, ratio_i].flatten(),
                        result_nontargets_all[min_dist_i, sigmax_level_i, ratio_i].transpose((0, 2, 1)).reshape((N*nb_repetitions, K)))

                    result_emfitallitems[min_dist_i, ratio_i] = [em_fit['kappa'], em_fit['mixt_target']] + em_fit['mixt_nontargets'].tolist() + [em_fit[key] for key in ('mixt_random', 'train_LL', 'bic')]

            # Save everything to a file, for faster later plotting
            if caching_emfit_filename is not None:
                try:
                    with open(caching_emfit_filename, 'w') as filecache_out:
                        data_em = dict(result_emfitallitems=result_emfitallitems)
                        pickle.dump(data_em, filecache_out, protocol=2)
                except IOError:
                    print "Error writing out to caching file ", caching_emfit_filename


        ## Plots now, for each distance!
        # for min_dist_plotting_i, min_dist_i in enumerate(min_dist_i_plotting_space):
        for min_dist_i in xrange(enforce_min_distance_space.size):

            # Plot now
            _, ax = plt.subplots()
            ax.plot(ratio_space, result_emfitallitems[min_dist_i, :, 1:5], linewidth=3)
            plt.ylim([0.0, 1.1])
            plt.legend(['Target', 'Nontarget 1', 'Nontarget 2', 'Random'], loc='upper left')

            if savefigs:
                dataio.save_current_figure('mindist%.2f_emprobsfullitems_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

    if plot_min_distance_effect:
        conj_receptive_field_size = 2.*np.pi/((all_args[0]['M']*ratio_space)**0.5)

        target_vs_nontargets_mindist_ratio = result_emfitallitems[..., 1]/np.sum(result_emfitallitems[..., 1:4], axis=-1)
        nontargetsmean_vs_targnontarg_mindist_ratio = np.mean(result_emfitallitems[..., 2:4]/np.sum(result_emfitallitems[..., 1:4], axis=-1)[..., np.newaxis], axis=-1)

        for ratio_conj_i, ratio_conj in enumerate(ratio_space):
            # Do one plot per ratio, putting the receptive field size on each
            f, ax = plt.subplots()

            ax.plot(enforce_min_distance_space[1:], target_vs_nontargets_mindist_ratio[1:, ratio_conj_i], linewidth=3, label='target mixture')
            ax.plot(enforce_min_distance_space[1:], nontargetsmean_vs_targnontarg_mindist_ratio[1:, ratio_conj_i], linewidth=3, label='non-target mixture')
            # ax.plot(enforce_min_distance_space[1:], result_emfitallitems[1:, ratio_conj_i, 1:5], linewidth=3)

            ax.axvline(x=conj_receptive_field_size[ratio_conj_i]/2., color='k', linestyle='--', linewidth=2)
            ax.axvline(x=conj_receptive_field_size[ratio_conj_i]*2., color='r', linestyle='--', linewidth=2)

            plt.legend(loc='upper left')
            plt.grid()
            # ax.set_xlabel('Stimuli separation')
            # ax.set_ylabel('Ratio Target to Non-targets')
            plt.axis('tight')
            ax.set_ylim([0.0, 1.0])
            ax.set_xlim([enforce_min_distance_space[1:].min(), enforce_min_distance_space[1:].max()])

            if savefigs:
                dataio.save_current_figure('ratio%.2f_mindistpred_ratiotargetnontarget_{label}_{unique_id}.pdf' % ratio_conj)



    variables_to_save = ['nb_repetitions']

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)
        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='specific_stimuli')

    plt.show()

    return locals()
def plots_memory_curves(data_pbs, generator_module=None):
    '''
        Reload and plot memory curve of a Mixed code.
        Can use Marginal Fisher Information and fitted Mixture Model as well
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plot_pcolor_fit_precision_to_fisherinfo = False
    plot_selected_memory_curves = False
    plot_best_memory_curves = False
    plot_subplots_persigmax = True

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_all_precisions_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    # ratio_space, sigmax_space, T, 5
    result_em_fits_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_fits_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_marginal_inv_fi_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_marginal_inv_fi']['results']), axis=-1)
    result_marginal_inv_fi_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_marginal_inv_fi']['results']), axis=-1)
    result_marginal_fi_mean = utils.nanmean(np.squeeze(1./data_pbs.dict_arrays['result_marginal_inv_fi']['results']), axis=-1)
    result_marginal_fi_std = utils.nanstd(np.squeeze(1./data_pbs.dict_arrays['result_marginal_inv_fi']['results']), axis=-1)

    ratioconj_space = data_pbs.loaded_data['parameters_uniques']['ratio_conj']
    sigmax_space = data_pbs.loaded_data['parameters_uniques']['sigmax']
    T_space = data_pbs.loaded_data['datasets_list'][0]['T_space']

    print ratioconj_space
    print sigmax_space
    print T_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape, result_marginal_inv_fi_mean.shape

    dataio = DataIO.DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    ## Load Experimental data
    data_simult = load_experimental_data.load_data_simult(data_dir=os.path.normpath(os.path.join(os.path.split(load_experimental_data.__file__)[0], '../../experimental_data/')), fit_mixture_model=True)
    memory_experimental_precision = data_simult['precision_nitems_theo']
    memory_experimental_kappa = np.array([data['kappa'] for _, data in data_simult['em_fits_nitems']['mean'].items()])
    memory_experimental_kappa_std = np.array([data['kappa'] for _, data in data_simult['em_fits_nitems']['std'].items()])

    experim_datadir = os.environ.get('WORKDIR_DROP', os.path.split(load_experimental_data.__file__)[0])
    data_bays2009 = load_experimental_data.load_data_bays09(data_dir=os.path.normpath(os.path.join(experim_datadir, '../../experimental_data/')), fit_mixture_model=True)
    bays09_experimental_mixtures_mean = data_bays2009['em_fits_nitems_arrays']['mean']
    # add interpolated points for 3 and 5 items
    bays3 = (bays09_experimental_mixtures_mean[:, 2] + bays09_experimental_mixtures_mean[:, 1])/2.
    bays5 = (bays09_experimental_mixtures_mean[:, -1] + bays09_experimental_mixtures_mean[:, -2])/2.
    bays09_experimental_mixtures_mean_compatible = np.c_[bays09_experimental_mixtures_mean[:,:2], bays3, bays09_experimental_mixtures_mean[:, 2], bays5]

    # Boost non-targets
    # bays09_experimental_mixtures_mean_compatible[1] *= 1.5
    # bays09_experimental_mixtures_mean_compatible[2] /= 1.5
    # bays09_experimental_mixtures_mean_compatible /= np.sum(bays09_experimental_mixtures_mean_compatible, axis=0)

    # Compute some landscapes of fit!
    dist_diff_precision_margfi = np.sum(np.abs(result_all_precisions_mean*2. - result_marginal_fi_mean[..., 0])**2., axis=-1)
    dist_ratio_precision_margfi = np.sum(np.abs((result_all_precisions_mean*2.)/result_marginal_fi_mean[..., 0] - 1.0)**2., axis=-1)
    dist_diff_emkappa_margfi = np.sum(np.abs(result_em_fits_mean[..., 0]*2. - result_marginal_fi_mean[..., 0])**2., axis=-1)
    dist_ratio_emkappa_margfi = np.sum(np.abs((result_em_fits_mean[..., 0]*2.)/result_marginal_fi_mean[..., 0] - 1.0)**2., axis=-1)

    dist_diff_precision_experim = np.sum(np.abs(result_all_precisions_mean - memory_experimental_precision)**2., axis=-1)
    dist_diff_emkappa_experim = np.sum(np.abs(result_em_fits_mean[..., 0] - memory_experimental_kappa)**2., axis=-1)

    dist_diff_precision_experim_1item = np.abs(result_all_precisions_mean[..., 0] - memory_experimental_precision[0])**2.
    dist_diff_precision_experim_2item = np.abs(result_all_precisions_mean[..., 1] - memory_experimental_precision[1])**2.
    dist_diff_precision_margfi_1item = np.abs(result_all_precisions_mean[..., 0]*2. - result_marginal_fi_mean[..., 0, 0])**2.
    dist_diff_emkappa_experim_1item = np.abs(result_em_fits_mean[..., 0, 0] - memory_experimental_kappa[0])**2.
    dist_diff_margfi_experim_1item = np.abs(result_marginal_fi_mean[..., 0, 0] - memory_experimental_precision[0])**2.

    dist_diff_emkappa_mixtures_bays09 = np.sum(np.sum((result_em_fits_mean[..., 1:4] - bays09_experimental_mixtures_mean_compatible[1:].T)**2., axis=-1), axis=-1)
    dist_diff_modelfits_experfits_bays09 = np.sum(np.sum((result_em_fits_mean[..., :4] - bays09_experimental_mixtures_mean_compatible.T)**2., axis=-1), axis=-1)


    if plot_pcolor_fit_precision_to_fisherinfo:
        # Check fit between precision and fisher info
        utils.pcolor_2d_data(dist_diff_precision_margfi, log_scale=True, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax')

        if savefigs:
            dataio.save_current_figure('match_precision_margfi_log_pcolor_{label}_{unique_id}.pdf')

        # utils.pcolor_2d_data(dist_diff_precision_margfi, x=ratioconj_space, y=sigmax_space[2:], xlabel='ratio conj', ylabel='sigmax')
        # if savefigs:
        #    dataio.save_current_figure('match_precision_margfi_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_ratio_precision_margfi, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_ratio_precision_margfi_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_emkappa_margfi, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_diff_emkappa_margfi_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_ratio_emkappa_margfi, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_ratio_emkappa_margfi_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_precision_experim, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_diff_precision_experim_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_emkappa_experim, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_diff_emkappa_experim_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_precision_margfi*dist_diff_emkappa_margfi*dist_diff_precision_experim*dist_diff_emkappa_experim, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_bigmultiplication_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_precision_margfi_1item, log_scale=True, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_precision_margfi_1item_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_precision_experim_1item, log_scale=True, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_precision_experim_1item_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_emkappa_experim_1item, log_scale=True, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_emkappa_experim_1item_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_margfi_experim_1item, log_scale=True, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_margfi_experim_1item_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_precision_experim_2item, log_scale=True, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_precision_experim_2item_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_emkappa_mixtures_bays09, log_scale=True, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_mixtures_experbays09_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_modelfits_experfits_bays09, log_scale=True, x=ratioconj_space, y=sigmax_space, xlabel='ratio conj', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_emfits_experbays09_pcolor_{label}_{unique_id}.pdf')

    # Macro plot
    def mem_plot_precision(sigmax_i, ratioconj_i):
        ax = utils.plot_mean_std_area(T_space, memory_experimental_precision, np.zeros(T_space.size), linewidth=3, fmt='o-', markersize=8, label='Experimental data')

        ax = utils.plot_mean_std_area(T_space, result_all_precisions_mean[ratioconj_i, sigmax_i], result_all_precisions_std[ratioconj_i, sigmax_i], ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Precision of samples')

        # ax = utils.plot_mean_std_area(T_space, 0.5*result_marginal_fi_mean[..., 0][ratioconj_i, sigmax_i], 0.5*result_marginal_fi_std[..., 0][ratioconj_i, sigmax_i], ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Marginal Fisher Information')

        # ax = utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 0][ratioconj_i, sigmax_i], result_em_fits_std[..., 0][ratioconj_i, sigmax_i], ax_handle=ax, xlabel='Number of items', ylabel="Inverse variance $[rad^{-2}]$", linewidth=3, fmt='o-', markersize=8, label='Fitted kappa')

        ax.set_title('ratio_conj %.2f, sigmax %.2f' % (ratioconj_space[ratioconj_i], sigmax_space[sigmax_i]))
        ax.legend()
        ax.set_xlim([0.9, 5.1])
        ax.set_xticks(range(1, 6))
        ax.set_xticklabels(range(1, 6))

        if savefigs:
            dataio.save_current_figure('memorycurves_precision_ratioconj%.2fsigmax%.2f_{label}_{unique_id}.pdf' % (ratioconj_space[ratioconj_i], sigmax_space[sigmax_i]))

    def mem_plot_kappa(sigmax_i, ratioconj_i):
        ax = utils.plot_mean_std_area(T_space, memory_experimental_kappa, memory_experimental_kappa_std, linewidth=3, fmt='o-', markersize=8, label='Experimental data')

        ax = utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 0][ratioconj_i, sigmax_i], result_em_fits_std[..., 0][ratioconj_i, sigmax_i], xlabel='Number of items', ylabel="Memory error $[rad^{-2}]$", ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Fitted kappa')

        # ax = utils.plot_mean_std_area(T_space, 0.5*result_marginal_fi_mean[..., 0][ratioconj_i, sigmax_i], 0.5*result_marginal_fi_std[..., 0][ratioconj_i, sigmax_i], ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Marginal Fisher Information')

        ax.set_title('ratio_conj %.2f, sigmax %.2f' % (ratioconj_space[ratioconj_i], sigmax_space[sigmax_i]))
        ax.legend()
        ax.set_xlim([0.9, 5.1])
        ax.set_xticks(range(1, 6))
        ax.set_xticklabels(range(1, 6))

        if savefigs:
            dataio.save_current_figure('memorycurves_kappa_ratioconj%.2fsigmax%.2f_{label}_{unique_id}.pdf' % (ratioconj_space[ratioconj_i], sigmax_space[sigmax_i]))

    def em_plot(sigmax_i, ratioconj_i):
        # TODO finish checking this up.
        f, ax = plt.subplots()
        ax2 = ax.twinx()

        # left axis, kappa
        ax = utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 0][ratioconj_i, sigmax_i], result_em_fits_std[..., 0][ratioconj_i, sigmax_i], xlabel='Number of items', ylabel="Inverse variance $[rad^{-2}]$", ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Fitted kappa', color='k')

        # Right axis, mixture probabilities
        utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 1][ratioconj_i, sigmax_i], result_em_fits_std[..., 1][ratioconj_i, sigmax_i], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Target')
        utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 2][ratioconj_i, sigmax_i], result_em_fits_std[..., 2][ratioconj_i, sigmax_i], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Nontarget')
        utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 3][ratioconj_i, sigmax_i], result_em_fits_std[..., 3][ratioconj_i, sigmax_i], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Random')

        lines, labels = ax.get_legend_handles_labels()
        lines2, labels2 = ax2.get_legend_handles_labels()
        ax.legend(lines + lines2, labels + labels2)

        ax.set_title('ratio_conj %.2f, sigmax %.2f' % (ratioconj_space[ratioconj_i], sigmax_space[sigmax_i]))
        ax.set_xlim([0.9, 5.1])
        ax.set_xticks(range(1, 6))
        ax.set_xticklabels(range(1, 6))

        f.canvas.draw()

        if savefigs:
            dataio.save_current_figure('memorycurves_emfits_ratioconj%.2fsigmax%.2f_{label}_{unique_id}.pdf' % (ratioconj_space[ratioconj_i], sigmax_space[sigmax_i]))

    def em_plot_paper(sigmax_i, ratioconj_i):
        f, ax = plt.subplots()

        # Right axis, mixture probabilities
        utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 1][ratioconj_i, sigmax_i], result_em_fits_std[..., 1][ratioconj_i, sigmax_i], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Target')
        utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 2][ratioconj_i, sigmax_i], result_em_fits_std[..., 2][ratioconj_i, sigmax_i], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Nontarget')
        utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 3][ratioconj_i, sigmax_i], result_em_fits_std[..., 3][ratioconj_i, sigmax_i], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Random')

        ax.legend(prop={'size':15})

        ax.set_title('ratio_conj %.2f, sigmax %.2f' % (ratioconj_space[ratioconj_i], sigmax_space[sigmax_i]))
        ax.set_xlim([1.0, 5.0])
        ax.set_ylim([0.0, 1.1])
        ax.set_xticks(range(1, 6))
        ax.set_xticklabels(range(1, 6))

        f.canvas.draw()

        if savefigs:
            dataio.save_current_figure('memorycurves_emfits_paper_ratioconj%.2fsigmax%.2f_{label}_{unique_id}.pdf' % (ratioconj_space[ratioconj_i], sigmax_space[sigmax_i]))


    if plot_selected_memory_curves:
        selected_values = [[0.84, 0.23], [0.84, 0.19]]

        for current_values in selected_values:
            # Find the indices
            ratioconj_i     = np.argmin(np.abs(current_values[0] - ratioconj_space))
            sigmax_i        = np.argmin(np.abs(current_values[1] - sigmax_space))


            mem_plot_precision(sigmax_i, ratioconj_i)
            mem_plot_kappa(sigmax_i, ratioconj_i)

    if plot_best_memory_curves:
        # Best precision fit
        best_axis2_i_all = np.argmin(dist_diff_precision_experim, axis=1)

        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            mem_plot_precision(best_axis2_i, axis1_i)

        # Best kappa fit
        best_axis2_i_all = np.argmin(dist_diff_emkappa_experim, axis=1)

        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            mem_plot_kappa(best_axis2_i, axis1_i)
            em_plot(best_axis2_i, axis1_i)

        # Best em parameters fit to Bays09
        best_axis2_i_all = np.argmin(dist_diff_modelfits_experfits_bays09, axis=1)

        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            mem_plot_kappa(best_axis2_i, axis1_i)
            # em_plot(best_axis2_i, axis1_i)
            em_plot_paper(best_axis2_i, axis1_i)

    if plot_subplots_persigmax:
        # Do subplots with ratio_conj on x, one plot per T and different figures per sigmax. Looks a bit like reloader_hierarchical_constantMMlower_maxlik_allresponses_221213.py

        sigmax_selected_indices = np.array([np.argmin((sigmax_space - sigmax_select)**2.) for sigmax_select in [0.1, 0.2, 0.3, 0.4, 0.5]])

        for sigmax_select_i in sigmax_selected_indices:
            # two plots per sigmax.

            f, axes = plt.subplots(nrows=T_space[-1], ncols=1, sharex=True, figsize=(10, 12))
            # all_lines_bis = []

            for T_i, T in enumerate(T_space):
                # Plot EM mixtures
                utils.plot_mean_std_area(ratioconj_space, result_em_fits_mean[:, sigmax_select_i, T_i, 1], result_em_fits_std[:, sigmax_select_i, T_i, 1], ax_handle=axes[T_i], linewidth=3, fmt='o-', markersize=5)
                utils.plot_mean_std_area(ratioconj_space, result_em_fits_mean[:, sigmax_select_i, T_i, 2], result_em_fits_std[:, sigmax_select_i, T_i, 2], ax_handle=axes[T_i], linewidth=3, fmt='o-', markersize=5)
                utils.plot_mean_std_area(ratioconj_space, result_em_fits_mean[:, sigmax_select_i, T_i, 3], result_em_fits_std[:, sigmax_select_i, T_i, 3], ax_handle=axes[T_i], linewidth=3, fmt='o-', markersize=5)

                # ratio_MMlower_space, result_emfits_filtered[:, i, 1:4], 0*result_emfits_filtered[:, i, 1:4], ax_handle=axes[T_i], linewidth=2) #, color=all_lines[T_i].get_color())
                # curr_lines = axes[T_i].plot(ratio_MMlower_space, results_precision_filtered[:, i], linewidth=2, color=all_lines[T_i].get_color())
                axes[T_i].grid()
                axes[T_i].set_xticks(np.linspace(0., 1.0, 5))
                axes[T_i].set_xlim((0.0, 1.0))
                # axes[T_i].set_yticks([])
                # axes[T_i].set_ylim((np.min(result_emfits_filtered[:, i, 0]), result_emfits_filtered[max_ind, i, 0]*1.1))
                axes[T_i].set_ylim((0.0, 1.05))
                axes[T_i].locator_params(axis='y', tight=True, nbins=4)
                # all_lines_bis.extend(curr_lines)

            axes[0].set_title('Sigmax: %.3f' % sigmax_space[sigmax_select_i])
            axes[-1].set_xlabel('Proportion of conjunctive units')

            if savefigs:
                dataio.save_current_figure('results_subplots_emmixtures_sigmax%.2f_{label}_global_{unique_id}.pdf' % sigmax_space[sigmax_select_i])

            f, axes = plt.subplots(nrows=T_space[-1], ncols=1, sharex=True, figsize=(10, 12))

            # Kappa kappa
            for T_i, T in enumerate(T_space):

                # Plot kappa mixture
                utils.plot_mean_std_area(ratioconj_space, result_em_fits_mean[:, sigmax_select_i, T_i, 0], result_em_fits_std[:, sigmax_select_i, T_i, 0], ax_handle=axes[T_i], linewidth=3, fmt='o-', markersize=5)
                # utils.plot_mean_std_area(ratio_MMlower_space, result_emfits_filtered[:, i, 0], 0*result_emfits_filtered[:, i, 0], ax_handle=axes[T_i], linewidth=2) #, color=all_lines[T_i].get_color())
                # curr_lines = axes[T_i].plot(ratio_MMlower_space, results_precision_filtered[:, i], linewidth=2, color=all_lines[T_i].get_color())
                axes[T_i].grid()
                axes[T_i].set_xticks(np.linspace(0., 1.0, 5))
                axes[T_i].set_xlim((0.0, 1.0))
                # axes[T_i].set_yticks([])
                # axes[T_i].set_ylim((np.min(result_emfits_filtered[:, i, 0]), result_emfits_filtered[max_ind, i, 0]*1.1))
                # axes[T_i].set_ylim((0.0, 1.0))
                axes[T_i].locator_params(axis='y', tight=True, nbins=4)
                # all_lines_bis.extend(curr_lines)

            axes[0].set_title('Sigmax: %.3f' % sigmax_space[sigmax_select_i])
            axes[-1].set_xlabel('Proportion of conjunctive units')

            # f.subplots_adjust(right=0.75)
            # plt.figlegend(all_lines_bis, ['%d item' % i + 's'*(i>1) for i in xrange(1, T+1)], loc='right', bbox_to_anchor=(1.0, 0.5))

            if savefigs:
                dataio.save_current_figure('results_subplots_emkappa_sigmax%.2f_{label}_global_{unique_id}.pdf' % sigmax_space[sigmax_select_i])



    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = ['memory_experimental_precision', 'memory_experimental_kappa', 'bays09_experimental_mixtures_mean_compatible']

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='memory_curves')

    plt.show()

    return locals()
def plots_3dvolume_hierarchical_M_Mlayerone(data_pbs, generator_module=None):
    '''
        Reload 3D volume runs from PBS and plot them

    '''

    #### SETUP
    #
    savefigs = True
    savedata = False

    plots_pcolors = False
    plots_singleaxe = False
    plots_multipleaxes = True
    plots_multipleaxes_emfits = True

    load_fit_mixture_model = True

    # caching_emfit_filename = None
    caching_emfit_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'outputs', 'cache_emfit.pickle')

    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    dataio = DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])


    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    results_precision_constant_M_Mlower = np.squeeze(utils.nanmean(data_pbs.dict_arrays['results_precision_M_T']['results'], axis=-1))
    results_precision_constant_M_Mlower_std = np.squeeze(utils.nanstd(data_pbs.dict_arrays['results_precision_M_T']['results'], axis=-1))
    results_responses = np.squeeze(data_pbs.dict_arrays['result_responses']['results'])
    results_targets = np.squeeze(data_pbs.dict_arrays['result_targets']['results'])
    results_nontargets = np.squeeze(data_pbs.dict_arrays['result_nontargets']['results'])
    results_emfits_M_T = np.squeeze(data_pbs.dict_arrays['results_emfits_M_T']['results'])

    M_space = data_pbs.loaded_data['parameters_uniques']['M']
    M_layer_one_space = data_pbs.loaded_data['parameters_uniques']['M_layer_one']
    ratio_MMlower_space = M_space/generator_module.filtering_function_parameters['target_M_total']

    filtering_indices = (np.arange(M_space.size), np.arange(-M_layer_one_space.size, 0)[::-1])

    T = results_precision_constant_M_Mlower.shape[-1]
    T_space = np.arange(T)

    print M_space
    print M_layer_one_space
    print results_precision_constant_M_Mlower.shape
    # print results_precision_constant_M_Mlower

    T = results_precision_constant_M_Mlower.shape[-1]
    results_precision_filtered = results_precision_constant_M_Mlower[filtering_indices]
    results_precision_filtered_std = results_precision_constant_M_Mlower_std[filtering_indices]
    results_responses_filtered = results_responses[filtering_indices]
    results_targets_filtered = results_targets[filtering_indices]
    results_nontargets_filtered = results_nontargets[filtering_indices]
    results_emfits_M_T_filtered = results_emfits_M_T[filtering_indices]

    results_precision_filtered_smoothed = np.apply_along_axis(smooth, 0, results_precision_filtered, *(10, 'bartlett'))

    if load_fit_mixture_model:
        # Fit the mixture model on the samples

        if caching_emfit_filename is not None:
            if os.path.exists(caching_emfit_filename):
                # Got file, open it and try to use its contents
                try:
                    with open(caching_emfit_filename, 'r') as file_in:
                        # Load and assign values
                        cached_data = pickle.load(file_in)
                        result_emfits_filtered = cached_data['result_emfits_filtered']
                        print "Loading from cache file %s" % caching_emfit_filename

                        load_fit_mixture_model = False

                except IOError:
                    print "Error while loading ", caching_emfit_filename, "falling back to computing the EM fits"
                    load_fit_mixture_model = False


        if load_fit_mixture_model:

            result_emfits_filtered = np.nan*np.empty((ratio_MMlower_space.size, T, 5))

            # Fit EM model
            print "fitting EM model"
            for ratio_MMlower_i, ratio_MMlower in enumerate(ratio_MMlower_space):
                for T_i in T_space:
                    if np.any(~np.isnan(results_responses_filtered[ratio_MMlower_i, T_i])):
                        print "ratio MM, T:", ratio_MMlower, T_i+1
                        curr_em_fits = em_circularmixture_allitems_uniquekappa.fit(results_responses_filtered[ratio_MMlower_i, T_i], results_targets_filtered[ratio_MMlower_i, T_i], results_nontargets_filtered[ratio_MMlower_i, T_i, :, :T_i])

                        curr_em_fits['mixt_nontargets_sum'] = np.sum(curr_em_fits['mixt_nontargets'])
                        result_emfits_filtered[ratio_MMlower_i, T_i] = [curr_em_fits[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL')]

            # Save everything to a file, for faster later plotting
            if caching_emfit_filename is not None:
                try:
                    with open(caching_emfit_filename, 'w') as filecache_out:
                        data_emfit = dict(result_emfits_filtered=result_emfits_filtered)
                        pickle.dump(data_emfit, filecache_out, protocol=2)
                        print "cache file %s written" % caching_emfit_filename
                except IOError:
                    print "Error writing out to caching file ", caching_emfit_filename


    if plots_pcolors:

        utils.pcolor_2d_data(results_precision_filtered, log_scale=True, x=ratio_MMlower_space, y=np.arange(1, T+1), xlabel="$\\frac{M}{M+M_{layer one}}$", ylabel='$T$', ticks_interpolate=10)
        plt.plot(np.argmax(results_precision_filtered, axis=0), np.arange(results_precision_filtered.shape[-1]), 'ko', markersize=10)

        if savefigs:
            dataio.save_current_figure('results_2dlog_{label}_global_{unique_id}.pdf')

        utils.pcolor_2d_data(results_precision_filtered/np.max(results_precision_filtered, axis=0), x=ratio_MMlower_space, y=np.arange(1, T+1), xlabel="$\\frac{M}{M+M_{layer one}}$", ylabel='$T$', ticks_interpolate=10)
        plt.plot(np.argmax(results_precision_filtered, axis=0), np.arange(results_precision_filtered.shape[-1]), 'ko', markersize=10)

        if savefigs:
            dataio.save_current_figure('results_2dnorm_{label}_global_{unique_id}.pdf')


        utils.pcolor_2d_data(results_precision_filtered_smoothed/np.max(results_precision_filtered_smoothed, axis=0), x=ratio_MMlower_space, y=np.arange(1, T+1), xlabel="$\\frac{M}{M+M_{layer one}}$", ylabel='$T$', ticks_interpolate=10)
        plt.plot(np.argmax(results_precision_filtered_smoothed, axis=0), np.arange(results_precision_filtered_smoothed.shape[-1]), 'ko', markersize=10)

        if savefigs:
            dataio.save_current_figure('results_2dsmoothnorm_{label}_global_{unique_id}.pdf')

    if plots_singleaxe:

        plt.figure()
        plt.plot(ratio_MMlower_space, results_precision_filtered_smoothed/np.max(results_precision_filtered_smoothed, axis=0), linewidth=2)
        plt.plot(ratio_MMlower_space[np.argmax(results_precision_filtered_smoothed, axis=0)], np.ones(results_precision_filtered_smoothed.shape[-1]), 'ro', markersize=10)
        plt.grid()
        plt.ylim((0., 1.1))
        plt.subplots_adjust(right=0.8)
        plt.legend(['%d item' % i + 's'*(i>1) for i in xrange(1, T+1)], loc='center right', bbox_to_anchor=(1.3, 0.5))
        plt.xticks(np.linspace(0, 1.0, 5))

        if savefigs:
            dataio.save_current_figure('results_1dsmoothnormsame_{label}_global_{unique_id}.pdf')

        plt.figure()
        moved_results_precision_filtered_smoothed = 1.2*np.arange(results_precision_filtered_smoothed.shape[-1]) + results_precision_filtered_smoothed/np.max(results_precision_filtered_smoothed, axis=0)
        all_lines = []
        for i, max_i in enumerate(np.argmax(results_precision_filtered_smoothed, axis=0)):
            curr_lines = plt.plot(ratio_MMlower_space, moved_results_precision_filtered_smoothed[:, i], linewidth=2)
            plt.plot(ratio_MMlower_space[max_i], moved_results_precision_filtered_smoothed[max_i, i], 'o', markersize=10, color=curr_lines[0].get_color())
            all_lines.extend(curr_lines)

        plt.plot(np.linspace(0.0, 1.0, 100), np.outer(np.ones(100), 1.2*np.arange(1, results_precision_filtered_smoothed.shape[-1])), 'k:')
        plt.grid()
        plt.legend(all_lines, ['%d item' % i + 's'*(i>1) for i in xrange(1, T+1)], loc='best')
        plt.ylim((0., moved_results_precision_filtered_smoothed.max()*1.05))
        plt.yticks([])
        plt.xticks(np.linspace(0, 1.0, 5))

        if savefigs:
            dataio.save_current_figure('results_1dsmoothnorm_{label}_global_{unique_id}.pdf')

    if plots_multipleaxes:
        # Plot smooth precisions, all T on multiple subplots.
        # all_lines = []
        f, axes = plt.subplots(nrows=T, ncols=1, sharex=True, figsize=(10, 12))
        for i, max_ind in enumerate(np.argmax(results_precision_filtered_smoothed, axis=0)):
            curr_lines = axes[i].plot(ratio_MMlower_space, results_precision_filtered_smoothed[:, i], linewidth=2) # , color=all_lines[i].get_color())
            axes[i].plot(ratio_MMlower_space[max_ind], results_precision_filtered_smoothed[max_ind, i], 'o', markersize=10, color=curr_lines[0].get_color())
            axes[i].grid()
            axes[i].set_xticks(np.linspace(0., 1.0, 5))
            axes[i].set_xlim((0.0, 1.0))
            # axes[i].set_yticks([])
            axes[i].set_ylim((np.min(results_precision_filtered_smoothed[:, i]), results_precision_filtered_smoothed[max_ind, i]*1.1))
            axes[i].locator_params(axis='y', tight=True, nbins=4)

            # all_lines.extend(curr_lines)

        f.subplots_adjust(right=0.75)
        # plt.figlegend(all_lines, ['%d item' % i + 's'*(i>1) for i in xrange(1, T+1)], loc='right', bbox_to_anchor=(1.0, 0.5))
        # f.tight_layout()

        if savefigs:
            dataio.save_current_figure('results_subplots_1dsmoothnorm_{label}_global_{unique_id}.pdf')


        # Plot precisions with standard deviation around
        f, axes = plt.subplots(nrows=T, ncols=1, sharex=True, figsize=(10, 12))
        # all_lines_bis = []
        for i, max_ind in enumerate(np.argmax(results_precision_filtered, axis=0)):
            utils.plot_mean_std_area(ratio_MMlower_space, results_precision_filtered[:, i], results_precision_filtered_std[:, i], ax_handle=axes[i], linewidth=2) #, color=all_lines[i].get_color())
            # curr_lines = axes[i].plot(ratio_MMlower_space, results_precision_filtered[:, i], linewidth=2, color=all_lines[i].get_color())
            axes[i].grid()
            axes[i].set_xticks(np.linspace(0., 1.0, 5))
            axes[i].set_xlim((0.0, 1.0))
            # axes[i].set_yticks([])
            axes[i].set_ylim((np.min(results_precision_filtered[:, i]), results_precision_filtered[max_ind, i]*1.1))
            axes[i].locator_params(axis='y', tight=True, nbins=4)

            # all_lines_bis.extend(curr_lines)

        f.subplots_adjust(right=0.75)
        # plt.figlegend(all_lines_bis, ['%d item' % i + 's'*(i>1) for i in xrange(1, T+1)], loc='right', bbox_to_anchor=(1.0, 0.5))

        if savefigs:
            dataio.save_current_figure('results_subplots_1dnorm_{label}_global_{unique_id}.pdf')

    if plots_multipleaxes_emfits:

        f, axes = plt.subplots(nrows=T, ncols=1, sharex=True, figsize=(10, 12))
        all_lines_bis = []

        for i, max_ind in enumerate(np.nanargmax(result_emfits_filtered[..., 0], axis=0)):
            utils.plot_mean_std_area(ratio_MMlower_space, result_emfits_filtered[:, i, 0], 0*result_emfits_filtered[:, i, 0], ax_handle=axes[i], linewidth=2) #, color=all_lines[i].get_color())
            # curr_lines = axes[i].plot(ratio_MMlower_space, results_precision_filtered[:, i], linewidth=2, color=all_lines[i].get_color())
            axes[i].grid()
            axes[i].set_xticks(np.linspace(0., 1.0, 5))
            axes[i].set_xlim((0.0, 1.0))
            # axes[i].set_yticks([])
            # axes[i].set_ylim((np.min(result_emfits_filtered[:, i, 0]), result_emfits_filtered[max_ind, i, 0]*1.1))
            axes[i].locator_params(axis='y', tight=True, nbins=4)
            # all_lines_bis.extend(curr_lines)

        f.subplots_adjust(right=0.75)
        # plt.figlegend(all_lines_bis, ['%d item' % i + 's'*(i>1) for i in xrange(1, T+1)], loc='right', bbox_to_anchor=(1.0, 0.5))

        if savefigs:
            dataio.save_current_figure('results_subplots_emkappa_{label}_global_{unique_id}.pdf')

    variables_to_save = []

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='hierarchicalrandomnetwork_characterisation')

    plt.show()

    return locals()
def plots_3dvolume_hierarchical_M_Mlayerone(data_pbs, generator_module=None):
    '''
        Reload 3D volume runs from PBS and plot them

    '''

    #### SETUP
    #
    savefigs = True
    savedata = False  # warning, huge file created...

    plots_pcolors = False
    plots_singleaxe = False
    plots_multipleaxes = True
    plots_multipleaxes_emfits = True

    load_fit_mixture_model = True

    # caching_emfit_filename = None
    caching_emfit_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'outputs', 'cache_emfit_newshapes.pickle')

    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    dataio = DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    results_precision_constant_M_Mlower = np.squeeze(utils.nanmean(data_pbs.dict_arrays['results_precision_M_T']['results'], axis=-1))
    results_precision_constant_M_Mlower_std = np.squeeze(utils.nanstd(data_pbs.dict_arrays['results_precision_M_T']['results'], axis=-1))
    results_responses = np.squeeze(data_pbs.dict_arrays['result_responses']['results'])
    results_targets = np.squeeze(data_pbs.dict_arrays['result_targets']['results'])
    results_nontargets = np.squeeze(data_pbs.dict_arrays['result_nontargets']['results'])
    # results_emfits_M_T = np.squeeze(data_pbs.dict_arrays['results_emfits_M_T']['results'])

    M_space = data_pbs.loaded_data['parameters_uniques']['M']
    M_layer_one_space = data_pbs.loaded_data['parameters_uniques']['M_layer_one']
    ratio_MMlower_space = M_space/generator_module.filtering_function_parameters['target_M_total']

    filtering_indices = (np.arange(M_space.size), np.arange(-M_layer_one_space.size, 0)[::-1])

    T = results_precision_constant_M_Mlower.shape[-1]
    T_space = np.arange(T)
    N = results_nontargets.shape[-2]
    num_repetitions = data_pbs.loaded_data['args_list'][0]['num_repetitions']

    # Fix after @3625585, now the shape are:
    #   results_responses, results_targets: M_space.size, M_layer_one_space.size, T, wrong num_repet, N
    #   results_nontargets: M_space.size, M_layer_one_space.size, T, wrong num_repet, N, T-1
    if data_pbs.loaded_data['nb_datasets_per_parameters'] > 1:
        num_repetitions = data_pbs.loaded_data['nb_datasets_per_parameters']

        # Filter and reshape
        results_responses = results_responses[:, :, :, :num_repetitions, :]
        results_responses.shape = (M_space.size, M_layer_one_space.size, T, num_repetitions*N)

        results_targets = results_targets[:, :, :, :num_repetitions, :]
        results_targets.shape = (M_space.size, M_layer_one_space.size, T, num_repetitions*N)

        results_nontargets = results_nontargets[:, :, :, :num_repetitions, :, :]
        results_nontargets.shape = (M_space.size, M_layer_one_space.size, T, num_repetitions*N, T-1)


    print M_space
    print M_layer_one_space
    print results_precision_constant_M_Mlower.shape
    # print results_precision_constant_M_Mlower

    results_precision_filtered = results_precision_constant_M_Mlower[filtering_indices]
    del results_precision_constant_M_Mlower
    results_precision_filtered_std = results_precision_constant_M_Mlower_std[filtering_indices]
    del results_precision_constant_M_Mlower_std
    results_responses_filtered = results_responses[filtering_indices]
    del results_responses
    results_targets_filtered = results_targets[filtering_indices]
    del results_targets
    results_nontargets_filtered = results_nontargets[filtering_indices]
    del results_nontargets
    # results_emfits_M_T_filtered = results_emfits_M_T[filtering_indices]
    # del results_emfits_M_T

    results_precision_filtered_smoothed = np.apply_along_axis(smooth, 0, results_precision_filtered, *(10, 'bartlett'))

    if load_fit_mixture_model:
        # Fit the mixture model on the samples

        if caching_emfit_filename is not None:
            if os.path.exists(caching_emfit_filename):
                # Got file, open it and try to use its contents
                try:
                    with open(caching_emfit_filename, 'r') as file_in:
                        # Load and assign values
                        cached_data = pickle.load(file_in)
                        result_emfits_filtered = cached_data['result_emfits_filtered']
                        results_responses_sha1_loaded = cached_data.get('results_responses_sha1', '')
                        # Check that the sha1 is the same, if not recompute!
                        if results_responses_sha1_loaded == hashlib.sha1(results_responses_filtered).hexdigest():
                            print "Loading from cache file %s" % caching_emfit_filename
                            load_fit_mixture_model = False
                        else:
                            print "Tried loading from cache file %s, but data changed, recomputing..." % caching_emfit_filename
                            load_fit_mixture_model = True

                except IOError:
                    print "Error while loading ", caching_emfit_filename, "falling back to computing the EM fits"
                    load_fit_mixture_model = False


        if load_fit_mixture_model:

            result_emfits_filtered = np.nan*np.empty((ratio_MMlower_space.size, T, 5))

            # Fit EM model
            print "fitting EM model"
            for ratio_MMlower_i, ratio_MMlower in enumerate(ratio_MMlower_space):
                for T_i in T_space:
                    if np.any(~np.isnan(results_responses_filtered[ratio_MMlower_i, T_i])):
                        print "ratio MM, T:", ratio_MMlower, T_i+1
                        curr_em_fits = em_circularmixture_allitems_uniquekappa.fit(results_responses_filtered[ratio_MMlower_i, T_i], results_targets_filtered[ratio_MMlower_i, T_i], results_nontargets_filtered[ratio_MMlower_i, T_i, :, :T_i])

                        curr_em_fits['mixt_nontargets_sum'] = np.sum(curr_em_fits['mixt_nontargets'])
                        result_emfits_filtered[ratio_MMlower_i, T_i] = [curr_em_fits[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL')]

            # Save everything to a file, for faster later plotting
            if caching_emfit_filename is not None:
                try:
                    with open(caching_emfit_filename, 'w') as filecache_out:
                        results_responses_sha1 = hashlib.sha1(results_responses_filtered).hexdigest()
                        data_emfit = dict(result_emfits_filtered=result_emfits_filtered, results_responses_sha1=results_responses_sha1)

                        pickle.dump(data_emfit, filecache_out, protocol=2)
                        print "cache file %s written" % caching_emfit_filename
                except IOError:
                    print "Error writing out to caching file ", caching_emfit_filename


    if plots_multipleaxes:

        # Plot precisions with standard deviation around
        f, axes = plt.subplots(nrows=T, ncols=1, sharex=True, figsize=(10, 12))
        # all_lines_bis = []
        for i, max_ind in enumerate(np.argmax(results_precision_filtered, axis=0)):
            utils.plot_mean_std_area(ratio_MMlower_space, results_precision_filtered[:, i], results_precision_filtered_std[:, i], ax_handle=axes[i], linewidth=2) #, color=all_lines[i].get_color())
            # curr_lines = axes[i].plot(ratio_MMlower_space, results_precision_filtered[:, i], linewidth=2, color=all_lines[i].get_color())
            axes[i].grid()
            axes[i].set_xticks(np.linspace(0., 1.0, 5))
            axes[i].set_xlim((0.0, 1.0))
            # axes[i].set_yticks([])
            axes[i].set_ylim((np.min(results_precision_filtered[:, i]), results_precision_filtered[max_ind, i]*1.1))
            axes[i].locator_params(axis='y', tight=True, nbins=4)

            # all_lines_bis.extend(curr_lines)

        f.subplots_adjust(right=0.75)
        # plt.figlegend(all_lines_bis, ['%d item' % i + 's'*(i>1) for i in xrange(1, T+1)], loc='right', bbox_to_anchor=(1.0, 0.5))

        if savefigs:
            dataio.save_current_figure('results_subplots_1dnorm_{label}_global_{unique_id}.pdf')

    if plots_multipleaxes_emfits:

        f, axes = plt.subplots(nrows=T, ncols=1, sharex=True, figsize=(10, 12))
        all_lines_bis = []

        for i, max_ind in enumerate(np.nanargmax(result_emfits_filtered[..., 0], axis=0)):
            # Plot Target mixture
            utils.plot_mean_std_area(ratio_MMlower_space, result_emfits_filtered[:, i, 1:4], 0*result_emfits_filtered[:, i, 1:4], ax_handle=axes[i], linewidth=2) #, color=all_lines[i].get_color())
            # curr_lines = axes[i].plot(ratio_MMlower_space, results_precision_filtered[:, i], linewidth=2, color=all_lines[i].get_color())
            axes[i].grid()
            axes[i].set_xticks(np.linspace(0., 1.0, 5))
            axes[i].set_xlim((0.0, 1.0))
            # axes[i].set_yticks([])
            # axes[i].set_ylim((np.min(result_emfits_filtered[:, i, 0]), result_emfits_filtered[max_ind, i, 0]*1.1))
            axes[i].set_ylim((0.0, 1.05))
            axes[i].locator_params(axis='y', tight=True, nbins=4)
            # all_lines_bis.extend(curr_lines)

        if savefigs:
            dataio.save_current_figure('results_subplots_emtarget_{label}_global_{unique_id}.pdf')

        f, axes = plt.subplots(nrows=T, ncols=1, sharex=True, figsize=(10, 12))

        for i, max_ind in enumerate(np.nanargmax(result_emfits_filtered[..., 0], axis=0)):

            # Plot kappa mixture
            utils.plot_mean_std_area(ratio_MMlower_space, result_emfits_filtered[:, i, 0], 0*result_emfits_filtered[:, i, 0], ax_handle=axes[i], linewidth=2) #, color=all_lines[i].get_color())
            # curr_lines = axes[i].plot(ratio_MMlower_space, results_precision_filtered[:, i], linewidth=2, color=all_lines[i].get_color())
            axes[i].grid()
            axes[i].set_xticks(np.linspace(0., 1.0, 5))
            axes[i].set_xlim((0.0, 1.0))
            # axes[i].set_yticks([])
            # axes[i].set_ylim((np.min(result_emfits_filtered[:, i, 0]), result_emfits_filtered[max_ind, i, 0]*1.1))
            # axes[i].set_ylim((0.0, 1.0))
            axes[i].locator_params(axis='y', tight=True, nbins=4)
            # all_lines_bis.extend(curr_lines)

        # f.subplots_adjust(right=0.75)
        # plt.figlegend(all_lines_bis, ['%d item' % i + 's'*(i>1) for i in xrange(1, T+1)], loc='right', bbox_to_anchor=(1.0, 0.5))

        if savefigs:
            dataio.save_current_figure('results_subplots_emkappa_{label}_global_{unique_id}.pdf')

    variables_to_save = []

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

    dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='hierarchicalrandomnetwork_characterisation')

    plt.show()

    return locals()
def plots_ratioMscaling(data_pbs, generator_module=None):
    '''
        Reload and plot precision/fits of a Mixed code.
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plots_pcolor_all = True
    plots_effect_M_target_precision = True
    plots_multiple_precisions = True

    plots_effect_M_target_kappa = True

    plots_subpopulations_effects = True

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = (utils.nanmean(data_pbs.dict_arrays['result_all_precisions']['results'], axis=-1))
    result_all_precisions_std = (utils.nanstd(data_pbs.dict_arrays['result_all_precisions']['results'], axis=-1))
    result_em_fits_mean = (utils.nanmean(data_pbs.dict_arrays['result_em_fits']['results'], axis=-1))
    result_em_fits_std = (utils.nanstd(data_pbs.dict_arrays['result_em_fits']['results'], axis=-1))

    M_space = data_pbs.loaded_data['parameters_uniques']['M'].astype(int)
    ratio_space = data_pbs.loaded_data['parameters_uniques']['ratio_conj']
    num_repetitions = generator_module.num_repetitions

    print M_space
    print ratio_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape

    dataio = DataIO.DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    target_precision = 100.
    dist_to_target_precision = (result_all_precisions_mean - target_precision)**2.
    ratio_target_precision_given_M = ratio_space[np.argmin(dist_to_target_precision, axis=1)]

    if plots_pcolor_all:
        # Check evolution of precision given M and ratio
        utils.pcolor_2d_data(result_all_precisions_mean, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='precision wrt M / ratio')
        if savefigs:
            dataio.save_current_figure('precision_log_pcolor_{label}_{unique_id}.pdf')

        # See distance to target precision evolution
        utils.pcolor_2d_data(dist_to_target_precision, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='Dist to target precision')
        if savefigs:
            dataio.save_current_figure('dist_targetprecision_log_pcolor_{label}_{unique_id}.pdf')


        # Show kappa
        utils.pcolor_2d_data(result_em_fits_mean[..., 0], log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='kappa wrt M / ratio')
        if savefigs:
            dataio.save_current_figure('kappa_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data((result_em_fits_mean[..., 0] - 200)**2., log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='dist to kappa')
        if savefigs:
            dataio.save_current_figure('dist_kappa_log_pcolor_{label}_{unique_id}.pdf')


    if plots_effect_M_target_precision:
        def plot_ratio_target_precision(ratio_target_precision_given_M, target_precision):
            f, ax = plt.subplots()
            ax.plot(M_space, ratio_target_precision_given_M)
            ax.set_xlabel('M')
            ax.set_ylabel('Optimal ratio')
            ax.set_title('Optimal Ratio for precison %d' % target_precision)

            if savefigs:
                dataio.save_current_figure('effect_ratio_M_targetprecision%d_{label}_{unique_id}.pdf' % target_precision)

        plot_ratio_target_precision(ratio_target_precision_given_M, target_precision)

        if plots_multiple_precisions:
            target_precisions = np.array([100, 200, 300, 500, 1000])
            for target_precision in target_precisions:
                dist_to_target_precision = (result_all_precisions_mean - target_precision)**2.
                ratio_target_precision_given_M = ratio_space[np.argmin(dist_to_target_precision, axis=1)]

                # replot
                plot_ratio_target_precision(ratio_target_precision_given_M, target_precision)

                utils.pcolor_2d_data(dist_to_target_precision, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='Dist to target precision %d' % target_precision)
                if savefigs:
                    dataio.save_current_figure('dist_targetprecision%d_log_pcolor_{label}_{unique_id}.pdf' % target_precision)

    if plots_effect_M_target_kappa:
        def plot_ratio_target_kappa(ratio_target_kappa_given_M, target_kappa):
            f, ax = plt.subplots()
            ax.plot(M_space, ratio_target_kappa_given_M)
            ax.set_xlabel('M')
            ax.set_ylabel('Optimal ratio')
            ax.set_title('Optimal Ratio for kappa %d' % target_kappa)

            if savefigs:
                dataio.save_current_figure('effect_ratio_M_targetkappa%d_{label}_{unique_id}.pdf' % target_kappa)

        target_kappas = np.array([100, 200, 300, 500, 1000, 3000])
        for target_kappa in target_kappas:
            dist_to_target_kappa = (result_em_fits_mean[..., 0] - target_kappa)**2.
            ratio_target_kappa_given_M = ratio_space[np.argmin(dist_to_target_kappa, axis=1)]

            # replot
            plot_ratio_target_kappa(ratio_target_kappa_given_M, target_kappa)

            utils.pcolor_2d_data(dist_to_target_kappa, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='Dist to target kappa %d' % target_kappa)
            if savefigs:
                dataio.save_current_figure('dist_targetkappa%d_log_pcolor_{label}_{unique_id}.pdf' % target_kappa)

    if plots_subpopulations_effects:
        # result_all_precisions_mean
        for M_tot_selected_i, M_tot_selected in enumerate(M_space[::2]):

            M_conj_space = ((1.-ratio_space)*M_tot_selected).astype(int)
            M_feat_space = M_tot_selected - M_conj_space

            f, axes = plt.subplots(2, 2)
            axes[0, 0].plot(ratio_space, result_all_precisions_mean[M_tot_selected_i])
            axes[0, 0].set_xlabel('ratio')
            axes[0, 0].set_title('Measured precision')

            axes[1, 0].plot(ratio_space, M_conj_space**2*M_feat_space)
            axes[1, 0].set_xlabel('M_feat_size')
            axes[1, 0].set_title('M_c**2*M_f')

            axes[0, 1].plot(ratio_space, M_conj_space**2.)
            axes[0, 1].set_xlabel('M')
            axes[0, 1].set_title('M_c**2')

            axes[1, 1].plot(ratio_space, M_feat_space)
            axes[1, 1].set_xlabel('M')
            axes[1, 1].set_title('M_f')

            f.suptitle('M_tot %d' % M_tot_selected, fontsize=15)
            f.set_tight_layout(True)

            if savefigs:
                dataio.save_current_figure('scaling_subpop_Mtot%d_{label}_{unique_id}.pdf' % M_tot_selected)

            plt.close(f)





    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = []

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='ratio_scaling_M')

    plt.show()

    return locals()
def plots_memory_curves(data_pbs, generator_module=None):
    """
        Reload and plot memory curve of a Mixed code.
        Can use Marginal Fisher Information and fitted Mixture Model as well
    """

    #### SETUP
    #
    savefigs = True
    savedata = True

    do_error_distrib_fits = True

    plot_pcolor_fit_precision_to_fisherinfo = True
    plot_selected_memory_curves = False
    plot_best_memory_curves = False
    plot_best_error_distrib = True

    colormap = None  # or 'cubehelix'
    plt.rcParams["font.size"] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = utils.nanmean(data_pbs.dict_arrays["result_all_precisions"]["results"], axis=-1)
    result_all_precisions_std = utils.nanstd(data_pbs.dict_arrays["result_all_precisions"]["results"], axis=-1)
    result_em_fits_mean = utils.nanmean(data_pbs.dict_arrays["result_em_fits"]["results"], axis=-1)
    result_em_fits_std = utils.nanstd(data_pbs.dict_arrays["result_em_fits"]["results"], axis=-1)
    result_marginal_inv_fi_mean = utils.nanmean(data_pbs.dict_arrays["result_marginal_inv_fi"]["results"], axis=-1)
    result_marginal_inv_fi_std = utils.nanstd(data_pbs.dict_arrays["result_marginal_inv_fi"]["results"], axis=-1)
    result_marginal_fi_mean = utils.nanmean(1.0 / data_pbs.dict_arrays["result_marginal_inv_fi"]["results"], axis=-1)
    result_marginal_fi_std = utils.nanstd(1.0 / data_pbs.dict_arrays["result_marginal_inv_fi"]["results"], axis=-1)
    result_responses_all = data_pbs.dict_arrays["result_responses"]["results"]
    result_target_all = data_pbs.dict_arrays["result_target"]["results"]
    result_nontargets_all = data_pbs.dict_arrays["result_nontargets"]["results"]

    M_space = data_pbs.loaded_data["parameters_uniques"]["M"].astype(int)
    sigmax_space = data_pbs.loaded_data["parameters_uniques"]["sigmax"]
    T_space = data_pbs.loaded_data["datasets_list"][0]["T_space"]
    nb_repetitions = result_responses_all.shape[-1]

    print M_space
    print sigmax_space
    print T_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape, result_marginal_inv_fi_mean.shape

    dataio = DataIO.DataIO(
        output_folder=generator_module.pbs_submission_infos["simul_out_dir"] + "/outputs/",
        label="global_" + dataset_infos["save_output_filename"],
    )

    ## Load Experimental data
    experim_datadir = os.environ.get("WORKDIR_DROP", os.path.split(load_experimental_data.__file__)[0])
    data_simult = load_experimental_data.load_data_simult(
        data_dir=os.path.normpath(os.path.join(experim_datadir, "../../experimental_data/")), fit_mixture_model=True
    )
    gorgo11_experimental_precision = data_simult["precision_nitems_theo"]
    gorgo11_experimental_kappa = np.array([data["kappa"] for _, data in data_simult["em_fits_nitems"]["mean"].items()])
    gorgo11_experimental_emfits_mean = np.array(
        [
            [data[key] for _, data in data_simult["em_fits_nitems"]["mean"].items()]
            for key in ["kappa", "mixt_target", "mixt_nontargets", "mixt_random"]
        ]
    )
    gorgo11_experimental_emfits_std = np.array(
        [
            [data[key] for _, data in data_simult["em_fits_nitems"]["std"].items()]
            for key in ["kappa", "mixt_target", "mixt_nontargets", "mixt_random"]
        ]
    )
    gorgo11_experimental_emfits_sem = gorgo11_experimental_emfits_std / np.sqrt(np.unique(data_simult["subject"]).size)

    experim_datadir = os.environ.get("WORKDIR_DROP", os.path.split(load_experimental_data.__file__)[0])
    data_bays2009 = load_experimental_data.load_data_bays09(
        data_dir=os.path.normpath(os.path.join(experim_datadir, "../../experimental_data/")), fit_mixture_model=True
    )
    bays09_experimental_mixtures_mean = data_bays2009["em_fits_nitems_arrays"]["mean"]
    bays09_experimental_mixtures_std = data_bays2009["em_fits_nitems_arrays"]["std"]
    # add interpolated points for 3 and 5 items
    emfit_mean_intpfct = spint.interp1d(np.unique(data_bays2009["n_items"]), bays09_experimental_mixtures_mean)
    bays09_experimental_mixtures_mean_compatible = emfit_mean_intpfct(np.arange(1, 7))
    emfit_std_intpfct = spint.interp1d(np.unique(data_bays2009["n_items"]), bays09_experimental_mixtures_std)
    bays09_experimental_mixtures_std_compatible = emfit_std_intpfct(np.arange(1, 7))
    T_space_bays09 = np.arange(1, 6)

    # Boost non-targets
    # bays09_experimental_mixtures_mean_compatible[1] *= 1.5
    # bays09_experimental_mixtures_mean_compatible[2] /= 1.5
    # bays09_experimental_mixtures_mean_compatible /= np.sum(bays09_experimental_mixtures_mean_compatible, axis=0)

    # Compute some landscapes of fit!
    # dist_diff_precision_margfi = np.sum(np.abs(result_all_precisions_mean*2. - result_marginal_fi_mean[..., 0])**2., axis=-1)
    # dist_ratio_precision_margfi = np.sum(np.abs((result_all_precisions_mean*2.)/result_marginal_fi_mean[..., 0] - 1.0)**2., axis=-1)
    # dist_diff_emkappa_margfi = np.sum(np.abs(result_em_fits_mean[..., 0]*2. - result_marginal_fi_mean[..., 0])**2., axis=-1)
    # dist_ratio_emkappa_margfi = np.sum(np.abs((result_em_fits_mean[..., 0]*2.)/result_marginal_fi_mean[..., 0] - 1.0)**2., axis=-1)

    dist_diff_precision_experim = np.sum(
        np.abs(result_all_precisions_mean[..., : gorgo11_experimental_kappa.size] - gorgo11_experimental_precision)
        ** 2.0,
        axis=-1,
    )
    dist_diff_emkappa_experim = np.sum(
        np.abs(result_em_fits_mean[..., 0, : gorgo11_experimental_kappa.size] - gorgo11_experimental_kappa) ** 2.0,
        axis=-1,
    )

    dist_diff_em_mixtures_bays09 = np.sum(
        np.sum((result_em_fits_mean[..., 1:4] - bays09_experimental_mixtures_mean_compatible[1:].T) ** 2.0, axis=-1),
        axis=-1,
    )
    dist_diff_modelfits_experfits_bays09 = np.sum(
        np.sum((result_em_fits_mean[..., :4] - bays09_experimental_mixtures_mean_compatible.T) ** 2.0, axis=-1), axis=-1
    )

    if do_error_distrib_fits:
        print "computing error distribution histograms fits"
        # Now try to fit histograms of errors to target/nontargets
        bays09_hist_target_mean = data_bays2009["hist_cnts_target_nitems_stats"]["mean"]
        bays09_hist_target_std = data_bays2009["hist_cnts_target_nitems_stats"]["std"]
        bays09_hist_nontarget_mean = data_bays2009["hist_cnts_nontarget_nitems_stats"]["mean"]
        bays09_hist_nontarget_std = data_bays2009["hist_cnts_nontarget_nitems_stats"]["std"]
        T_space_bays09_filt = np.unique(data_bays2009["n_items"])

        angle_space = np.linspace(-np.pi, np.pi, bays09_hist_target_mean.shape[-1] + 1)
        bins_center = angle_space[:-1] + np.diff(angle_space)[0] / 2

        errors_targets = utils.wrap_angles(result_responses_all - result_target_all)
        hist_targets_all = np.empty(
            (M_space.size, sigmax_space.size, T_space_bays09_filt.size, angle_space.size - 1, nb_repetitions)
        )

        errors_nontargets = np.nan * np.empty(result_nontargets_all.shape)
        hist_nontargets_all = np.empty(
            (M_space.size, sigmax_space.size, T_space_bays09_filt.size, angle_space.size - 1, nb_repetitions)
        )
        for M_i, M in enumerate(M_space):
            for sigmax_i, sigmax in enumerate(sigmax_space):
                for T_bays_i, T_bays in enumerate(T_space_bays09_filt):
                    for repet_i in xrange(nb_repetitions):
                        # Could do a nicer indexing but f**k it

                        # Histogram errors to targets
                        hist_targets_all[M_i, sigmax_i, T_bays_i, :, repet_i], x, bins = utils.histogram_binspace(
                            utils.dropnan(errors_targets[M_i, sigmax_i, T_bays - 1, ..., repet_i]),
                            bins=angle_space,
                            norm="density",
                        )

                        # Compute the error between the responses and nontargets.
                        errors_nontargets[M_i, sigmax_i, T_bays - 1, :, :, repet_i] = utils.wrap_angles(
                            (
                                result_responses_all[M_i, sigmax_i, T_bays - 1, :, repet_i, np.newaxis]
                                - result_nontargets_all[M_i, sigmax_i, T_bays - 1, :, :, repet_i]
                            )
                        )
                        # Histogram it
                        hist_nontargets_all[M_i, sigmax_i, T_bays_i, :, repet_i], x, bins = utils.histogram_binspace(
                            utils.dropnan(errors_nontargets[M_i, sigmax_i, T_bays - 1, ..., repet_i]),
                            bins=angle_space,
                            norm="density",
                        )

        hist_targets_mean = utils.nanmean(hist_targets_all, axis=-1).filled(np.nan)
        hist_targets_std = utils.nanstd(hist_targets_all, axis=-1).filled(np.nan)
        hist_nontargets_mean = utils.nanmean(hist_nontargets_all, axis=-1).filled(np.nan)
        hist_nontargets_std = utils.nanstd(hist_nontargets_all, axis=-1).filled(np.nan)

        # Compute distances to experimental histograms
        dist_diff_hist_target_bays09 = np.nansum(
            np.nansum((hist_targets_mean - bays09_hist_target_mean) ** 2.0, axis=-1), axis=-1
        )
        dist_diff_hist_nontargets_bays09 = np.nansum(
            np.nansum((hist_nontargets_mean - bays09_hist_nontarget_mean) ** 2.0, axis=-1), axis=-1
        )
        dist_diff_hist_nontargets_5_6items_bays09 = np.nansum(
            np.nansum((hist_nontargets_mean[:, :, -2:] - bays09_hist_nontarget_mean[-2:]) ** 2.0, axis=-1), axis=-1
        )

    if plot_pcolor_fit_precision_to_fisherinfo:
        # Check fit between precision and Experiments
        utils.pcolor_2d_data(
            dist_diff_precision_experim,
            log_scale=True,
            x=M_space,
            y=sigmax_space,
            xlabel="M",
            ylabel="sigmax",
            xlabel_format="%d",
        )
        if savefigs:
            dataio.save_current_figure("match_precision_exper_log_pcolor_{label}_{unique_id}.pdf")

        utils.pcolor_2d_data(
            dist_diff_emkappa_experim, x=M_space, y=sigmax_space, xlabel="M", ylabel="sigmax", xlabel_format="%d"
        )
        if savefigs:
            dataio.save_current_figure("match_emkappa_model_exper_pcolor_{label}_{unique_id}.pdf")

        utils.pcolor_2d_data(
            dist_diff_em_mixtures_bays09,
            x=M_space,
            y=sigmax_space,
            xlabel="M",
            ylabel="sigmax",
            log_scale=True,
            xlabel_format="%d",
        )
        if savefigs:
            dataio.save_current_figure("match_emmixtures_experbays09_log_pcolor_{label}_{unique_id}.pdf")

        utils.pcolor_2d_data(
            dist_diff_modelfits_experfits_bays09,
            log_scale=True,
            x=M_space,
            y=sigmax_space,
            xlabel="M",
            ylabel="sigmax",
            xlabel_format="%d",
        )
        if savefigs:
            dataio.save_current_figure("match_diff_emfits_experbays09_pcolor_{label}_{unique_id}.pdf")

        if do_error_distrib_fits:
            utils.pcolor_2d_data(
                dist_diff_hist_target_bays09,
                x=M_space,
                y=sigmax_space,
                xlabel="M",
                ylabel="sigmax",
                log_scale=True,
                xlabel_format="%d",
            )
            if savefigs:
                dataio.save_current_figure("match_hist_targets_experbays09_log_pcolor_{label}_{unique_id}.pdf")

            utils.pcolor_2d_data(
                dist_diff_hist_nontargets_bays09,
                x=M_space,
                y=sigmax_space,
                xlabel="M",
                ylabel="sigmax",
                log_scale=True,
                xlabel_format="%d",
            )
            if savefigs:
                dataio.save_current_figure("match_hist_nontargets_experbays09_log_pcolor_{label}_{unique_id}.pdf")

            utils.pcolor_2d_data(
                dist_diff_hist_nontargets_5_6items_bays09,
                x=M_space,
                y=sigmax_space,
                xlabel="M",
                ylabel="sigmax",
                log_scale=True,
                xlabel_format="%d",
            )
            if savefigs:
                dataio.save_current_figure(
                    "match_hist_nontargets_6items_experbays09_log_pcolor_{label}_{unique_id}.pdf"
                )

    # Macro plot
    def mem_plot_precision(sigmax_i, M_i, mem_exp_prec):
        ax = utils.plot_mean_std_area(
            T_space[: mem_exp_prec.size],
            mem_exp_prec,
            np.zeros(mem_exp_prec.size),
            linewidth=3,
            fmt="o-",
            markersize=8,
            label="Experimental data",
        )

        ax = utils.plot_mean_std_area(
            T_space[: mem_exp_prec.size],
            result_all_precisions_mean[M_i, sigmax_i, : mem_exp_prec.size],
            result_all_precisions_std[M_i, sigmax_i, : mem_exp_prec.size],
            ax_handle=ax,
            linewidth=3,
            fmt="o-",
            markersize=8,
            label="Precision of samples",
        )

        # ax = utils.plot_mean_std_area(T_space, 0.5*result_marginal_fi_mean[..., 0][M_i, sigmax_i], 0.5*result_marginal_fi_std[..., 0][M_i, sigmax_i], ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Marginal Fisher Information')

        # ax = utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 0][M_i, sigmax_i], result_em_fits_std[..., 0][M_i, sigmax_i], ax_handle=ax, xlabel='Number of items', ylabel="Inverse variance $[rad^{-2}]$", linewidth=3, fmt='o-', markersize=8, label='Fitted kappa')

        ax.set_title("M %d, sigmax %.2f" % (M_space[M_i], sigmax_space[sigmax_i]))
        ax.legend()
        ax.set_xlim([0.9, mem_exp_prec.size + 0.1])
        ax.set_xticks(range(1, mem_exp_prec.size + 1))
        ax.set_xticklabels(range(1, mem_exp_prec.size + 1))

        if savefigs:
            dataio.save_current_figure(
                "memorycurves_precision_M%dsigmax%.2f_{label}_{unique_id}.pdf" % (M_space[M_i], sigmax_space[sigmax_i])
            )

    def mem_plot_kappa(sigmax_i, M_i, exp_kappa_mean, exp_kappa_std=None):
        ax = utils.plot_mean_std_area(
            T_space[: exp_kappa_mean.size],
            exp_kappa_mean,
            exp_kappa_std,
            linewidth=3,
            fmt="o-",
            markersize=8,
            label="Experimental data",
        )

        ax = utils.plot_mean_std_area(
            T_space[: exp_kappa_mean.size],
            result_em_fits_mean[..., : exp_kappa_mean.size, 0][M_i, sigmax_i],
            result_em_fits_std[..., : exp_kappa_mean.size, 0][M_i, sigmax_i],
            xlabel="Number of items",
            ylabel="Memory error $[rad^{-2}]$",
            linewidth=3,
            fmt="o-",
            markersize=8,
            label="Fitted kappa",
            ax_handle=ax,
        )

        # ax = utils.plot_mean_std_area(T_space, 0.5*result_marginal_fi_mean[..., 0][M_i, sigmax_i], 0.5*result_marginal_fi_std[..., 0][M_i, sigmax_i], ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Marginal Fisher Information')

        ax.set_title("M %d, sigmax %.2f" % (M_space[M_i], sigmax_space[sigmax_i]))
        ax.legend()
        ax.set_xlim([0.9, exp_kappa_mean.size + 0.1])
        ax.set_xticks(range(1, exp_kappa_mean.size + 1))
        ax.set_xticklabels(range(1, exp_kappa_mean.size + 1))

        ax.get_figure().canvas.draw()

        if savefigs:
            dataio.save_current_figure(
                "memorycurves_kappa_M%dsigmax%.2f_{label}_{unique_id}.pdf" % (M_space[M_i], sigmax_space[sigmax_i])
            )

    def em_plot(sigmax_i, M_i):
        f, ax = plt.subplots()
        ax2 = ax.twinx()

        # left axis, kappa
        ax = utils.plot_mean_std_area(
            T_space,
            result_em_fits_mean[..., 0][M_i, sigmax_i],
            result_em_fits_std[..., 0][M_i, sigmax_i],
            xlabel="Number of items",
            ylabel="Inverse variance $[rad^{-2}]$",
            ax_handle=ax,
            linewidth=3,
            fmt="o-",
            markersize=8,
            label="Fitted kappa",
            color="k",
        )

        # Right axis, mixture probabilities
        utils.plot_mean_std_area(
            T_space,
            result_em_fits_mean[..., 1][M_i, sigmax_i],
            result_em_fits_std[..., 1][M_i, sigmax_i],
            xlabel="Number of items",
            ylabel="Mixture probabilities",
            ax_handle=ax2,
            linewidth=3,
            fmt="o-",
            markersize=8,
            label="Target",
        )
        utils.plot_mean_std_area(
            T_space,
            result_em_fits_mean[..., 2][M_i, sigmax_i],
            result_em_fits_std[..., 2][M_i, sigmax_i],
            xlabel="Number of items",
            ylabel="Mixture probabilities",
            ax_handle=ax2,
            linewidth=3,
            fmt="o-",
            markersize=8,
            label="Nontarget",
        )
        utils.plot_mean_std_area(
            T_space,
            result_em_fits_mean[..., 3][M_i, sigmax_i],
            result_em_fits_std[..., 3][M_i, sigmax_i],
            xlabel="Number of items",
            ylabel="Mixture probabilities",
            ax_handle=ax2,
            linewidth=3,
            fmt="o-",
            markersize=8,
            label="Random",
        )

        lines, labels = ax.get_legend_handles_labels()
        lines2, labels2 = ax2.get_legend_handles_labels()
        ax.legend(lines + lines2, labels + labels2)

        ax.set_title("M %d, sigmax %.2f" % (M_space[M_i], sigmax_space[sigmax_i]))
        ax.set_xlim([0.9, T_space.size])
        ax.set_xticks(range(1, T_space.size + 1))
        ax.set_xticklabels(range(1, T_space.size + 1))

        f.canvas.draw()

        if savefigs:
            dataio.save_current_figure(
                "memorycurves_emfits_M%dsigmax%.2f_{label}_{unique_id}.pdf" % (M_space[M_i], sigmax_space[sigmax_i])
            )

    def em_plot_paper(sigmax_i, M_i):
        f, ax = plt.subplots()

        # Right axis, mixture probabilities
        utils.plot_mean_std_area(
            T_space_bays09,
            result_em_fits_mean[..., 1][M_i, sigmax_i][: T_space_bays09.size],
            result_em_fits_std[..., 1][M_i, sigmax_i][: T_space_bays09.size],
            xlabel="Number of items",
            ylabel="Mixture probabilities",
            ax_handle=ax,
            linewidth=3,
            fmt="o-",
            markersize=5,
            label="Target",
        )
        utils.plot_mean_std_area(
            T_space_bays09,
            result_em_fits_mean[..., 2][M_i, sigmax_i][: T_space_bays09.size],
            result_em_fits_std[..., 2][M_i, sigmax_i][: T_space_bays09.size],
            xlabel="Number of items",
            ylabel="Mixture probabilities",
            ax_handle=ax,
            linewidth=3,
            fmt="o-",
            markersize=5,
            label="Nontarget",
        )
        utils.plot_mean_std_area(
            T_space_bays09,
            result_em_fits_mean[..., 3][M_i, sigmax_i][: T_space_bays09.size],
            result_em_fits_std[..., 3][M_i, sigmax_i][: T_space_bays09.size],
            xlabel="Number of items",
            ylabel="Mixture probabilities",
            ax_handle=ax,
            linewidth=3,
            fmt="o-",
            markersize=5,
            label="Random",
        )

        ax.legend(prop={"size": 15})

        ax.set_title("M %d, sigmax %.2f" % (M_space[M_i], sigmax_space[sigmax_i]))
        ax.set_xlim([1.0, T_space_bays09.size])
        ax.set_ylim([0.0, 1.1])
        ax.set_xticks(range(1, T_space_bays09.size + 1))
        ax.set_xticklabels(range(1, T_space_bays09.size + 1))

        f.canvas.draw()

        if savefigs:
            dataio.save_current_figure(
                "memorycurves_emfits_paper_M%dsigmax%.2f_{label}_{unique_id}.pdf"
                % (M_space[M_i], sigmax_space[sigmax_i])
            )

    def hist_errors_targets_nontargets(hists_toplot_mean, hists_toplot_std, title="", M=0, sigmax=0, yaxis_lim="auto"):

        f1, axes1 = plt.subplots(
            ncols=hists_toplot_mean.shape[-2], figsize=(hists_toplot_mean.shape[-2] * 6, 6), sharey=True
        )

        for T_bays_i, T_bays in enumerate(T_space_bays09_filt):
            if not np.all(np.isnan(hists_toplot_mean[T_bays_i])):
                axes1[T_bays_i].bar(
                    bins_center,
                    hists_toplot_mean[T_bays_i],
                    width=2.0 * np.pi / (angle_space.size - 1),
                    align="center",
                    yerr=hists_toplot_std[T_bays_i],
                )
                axes1[T_bays_i].set_title("N=%d" % T_bays)

                # axes1[T_{}]

                axes1[T_bays_i].set_xlim(
                    [bins_center[0] - np.pi / (angle_space.size - 1), bins_center[-1] + np.pi / (angle_space.size - 1)]
                )
                if yaxis_lim == "target":
                    axes1[T_bays_i].set_ylim([0.0, 2.0])
                elif yaxis_lim == "nontarget":
                    axes1[T_bays_i].set_ylim([0.0, 0.3])
                else:
                    axes1[T_bays_i].set_ylim([0.0, np.nanmax(hists_toplot_mean + hists_toplot_std) * 1.1])
                axes1[T_bays_i].set_xticks((-np.pi, -np.pi / 2, 0, np.pi / 2.0, np.pi))
                axes1[T_bays_i].set_xticklabels(
                    (r"$-\pi$", r"$-\frac{\pi}{2}$", r"$0$", r"$\frac{\pi}{2}$", r"$\pi$"), fontsize=16
                )

        f1.canvas.draw()

        if savefigs:
            dataio.save_current_figure(
                "memorycurves_hist_%s_paper_M%dsigmax%.2f_{label}_{unique_id}.pdf" % (title, M, sigmax)
            )

    #################################

    if plot_selected_memory_curves:
        selected_values = [[0.84, 0.23], [0.84, 0.19]]

        for current_values in selected_values:
            # Find the indices
            M_i = np.argmin(np.abs(current_values[0] - M_space))
            sigmax_i = np.argmin(np.abs(current_values[1] - sigmax_space))

            mem_plot_precision(sigmax_i, M_i)
            mem_plot_kappa(sigmax_i, M_i)

    if plot_best_memory_curves:
        # Best precision fit
        best_axis2_i_all = np.argmin(dist_diff_precision_experim, axis=1)

        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            mem_plot_precision(best_axis2_i, axis1_i, gorgo11_experimental_precision)

        # Best kappa fit
        best_axis2_i_all = np.argmin(dist_diff_emkappa_experim, axis=1)

        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            mem_plot_kappa(
                best_axis2_i, axis1_i, gorgo11_experimental_emfits_mean[0], gorgo11_experimental_emfits_std[0]
            )
            # em_plot(best_axis2_i, axis1_i)

        # Best em parameters fit to Bays09
        best_axis2_i_all = np.argmin(dist_diff_modelfits_experfits_bays09, axis=1)
        # best_axis2_i_all = np.argmin(dist_diff_em_mixtures_bays09, axis=1)

        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            mem_plot_kappa(
                best_axis2_i,
                axis1_i,
                bays09_experimental_mixtures_mean_compatible[0, : T_space_bays09.size],
                bays09_experimental_mixtures_std_compatible[0, : T_space_bays09.size],
            )
            # em_plot(best_axis2_i, axis1_i)
            em_plot_paper(best_axis2_i, axis1_i)

    if plot_best_error_distrib and do_error_distrib_fits:

        # Best target histograms
        best_axis2_i_all = np.argmin(dist_diff_hist_target_bays09, axis=1)

        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            hist_errors_targets_nontargets(
                hist_targets_mean[axis1_i, best_axis2_i],
                hist_targets_std[axis1_i, best_axis2_i],
                "target",
                M=M_space[axis1_i],
                sigmax=sigmax_space[best_axis2_i],
                yaxis_lim="target",
            )

        # Best nontarget histograms
        best_axis2_i_all = np.argmin(dist_diff_hist_nontargets_bays09, axis=1)

        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            hist_errors_targets_nontargets(
                hist_nontargets_mean[axis1_i, best_axis2_i],
                hist_nontargets_std[axis1_i, best_axis2_i],
                "nontarget",
                M=M_space[axis1_i],
                sigmax=sigmax_space[best_axis2_i],
                yaxis_lim="nontarget",
            )

    all_args = data_pbs.loaded_data["args_list"]
    variables_to_save = [
        "gorgo11_experimental_precision",
        "gorgo11_experimental_kappa",
        "bays09_experimental_mixtures_mean_compatible",
        "T_space",
    ]

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder="memory_curves")

    plt.show()

    return locals()
def plots_ratioMscaling(data_pbs, generator_module=None):
    '''
        Reload and plot precision/fits of a Mixed code.
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plots_pcolor_all = False
    plots_effect_M_target_kappa = False

    plots_kappa_fi_comparison = False
    plots_multiple_fisherinfo = False
    specific_plot_effect_R = True


    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = (utils.nanmean(data_pbs.dict_arrays['result_all_precisions']['results'], axis=-1))
    result_all_precisions_std = (utils.nanstd(data_pbs.dict_arrays['result_all_precisions']['results'], axis=-1))
    result_em_fits_mean = (utils.nanmean(data_pbs.dict_arrays['result_em_fits']['results'], axis=-1))
    result_em_fits_std = (utils.nanstd(data_pbs.dict_arrays['result_em_fits']['results'], axis=-1))
    result_fisherinfo_mean = (utils.nanmean(data_pbs.dict_arrays['result_fisher_info']['results'], axis=-1))
    result_fisherinfo_std = (utils.nanstd(data_pbs.dict_arrays['result_fisher_info']['results'], axis=-1))

    all_args = data_pbs.loaded_data['args_list']

    result_em_fits_kappa = result_em_fits_mean[..., 0]
    result_em_fits_target = result_em_fits_mean[..., 1]
    result_em_fits_kappa_valid = np.ma.masked_where(result_em_fits_target < 0.9, result_em_fits_kappa)

    M_space = data_pbs.loaded_data['parameters_uniques']['M'].astype(int)
    ratio_space = data_pbs.loaded_data['parameters_uniques']['ratio_conj']
    R_space = data_pbs.loaded_data['parameters_uniques']['R'].astype(int)
    num_repetitions = generator_module.num_repetitions

    print M_space
    print ratio_space
    print R_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape

    dataio = DataIO.DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    MAX_DISTANCE = 100.

    if plots_pcolor_all:
        # Do one pcolor for M and ratio per R
        for R_i, R in enumerate(R_space):
            # Check evolution of precision given M and ratio
            # utils.pcolor_2d_data(result_all_precisions_mean[..., R_i], log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='precision, R=%d' % R)
            # if savefigs:
            #     dataio.save_current_figure('pcolor_precision_R%d_log_{label}_{unique_id}.pdf' % R)

            # Show kappa
            try:
                utils.pcolor_2d_data(result_em_fits_kappa_valid[..., R_i], log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='kappa, R=%d' % R)
                if savefigs:
                    dataio.save_current_figure('pcolor_kappa_R%d_log_{label}_{unique_id}.pdf' % R)
            except ValueError:
                pass

            # Show probability on target
            # utils.pcolor_2d_data(result_em_fits_target[..., R_i], log_scale=False, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='target, R=%d' % R)
            # if savefigs:
            #     dataio.save_current_figure('pcolor_target_R%d_{label}_{unique_id}.pdf' % R)

            # # Show Fisher info
            # utils.pcolor_2d_data(result_fisherinfo_mean[..., R_i], log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='fisher info, R=%d' % R)
            # if savefigs:
            #     dataio.save_current_figure('pcolor_fisherinfo_R%d_log_{label}_{unique_id}.pdf' % R)

            plt.close('all')

    if plots_effect_M_target_kappa:
        def plot_ratio_target_kappa(ratio_target_kappa_given_M, target_kappa, R):
            f, ax = plt.subplots()
            ax.plot(M_space, ratio_target_kappa_given_M)
            ax.set_xlabel('M')
            ax.set_ylabel('Optimal ratio')
            ax.set_title('Optimal Ratio for kappa %d, R=%d' % (target_kappa, R))

            if savefigs:
                dataio.save_current_figure('optratio_M_targetkappa%d_R%d_{label}_{unique_id}.pdf' % (target_kappa, R))

        target_kappas = np.array([100, 200, 300, 500, 1000, 3000])
        for R_i, R in enumerate(R_space):
            for target_kappa in target_kappas:
                dist_to_target_kappa = (result_em_fits_kappa[..., R_i] - target_kappa)**2.
                best_dist_to_target_kappa = np.argmin(dist_to_target_kappa, axis=1)
                ratio_target_kappa_given_M = np.ma.masked_where(dist_to_target_kappa[np.arange(dist_to_target_kappa.shape[0]), best_dist_to_target_kappa] > MAX_DISTANCE, ratio_space[best_dist_to_target_kappa])

                # replot
                plot_ratio_target_kappa(ratio_target_kappa_given_M, target_kappa, R)

            plt.close('all')


    if plots_kappa_fi_comparison:

        # result_em_fits_kappa and fisher info
        if True:
            for R_i, R in enumerate(R_space):
                for M_tot_selected_i, M_tot_selected in enumerate(M_space[::2]):

                    # M_conj_space = ((1.-ratio_space)*M_tot_selected).astype(int)
                    # M_feat_space = M_tot_selected - M_conj_space

                    f, axes = plt.subplots(2, 1)
                    axes[0].plot(ratio_space, result_em_fits_kappa[2*M_tot_selected_i, ..., R_i])
                    axes[0].set_xlabel('ratio')
                    axes[0].set_title('Fitted kappa')

                    axes[1].plot(ratio_space, utils.stddev_to_kappa(1./result_fisherinfo_mean[2*M_tot_selected_i, ..., R_i]**0.5))
                    axes[1].set_xlabel('ratio')
                    axes[1].set_title('kappa_FI')

                    f.suptitle('M_tot %d' % M_tot_selected, fontsize=15)
                    f.set_tight_layout(True)

                    if savefigs:
                        dataio.save_current_figure('comparison_kappa_fisher_R%d_M%d_{label}_{unique_id}.pdf' % (R, M_tot_selected))

                    plt.close(f)

        if plots_multiple_fisherinfo:
            target_fisherinfos = np.array([100, 200, 300, 500, 1000])
            for R_i, R in enumerate(R_space):
                for target_fisherinfo in target_fisherinfos:
                    dist_to_target_fisherinfo = (result_fisherinfo_mean[..., R_i] - target_fisherinfo)**2.

                    utils.pcolor_2d_data(dist_to_target_fisherinfo, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='Fisher info, R=%d' % R)
                    if savefigs:
                        dataio.save_current_figure('pcolor_distfi%d_R%d_log_{label}_{unique_id}.pdf' % (target_fisherinfo, R))

                plt.close('all')

    if specific_plot_effect_R:
        # Choose a M, find which ratio gives best fit to a given kappa
        M_target = 228
        M_target_i = np.argmin(np.abs(M_space - M_target))

        # target_kappa = np.ma.mean(result_em_fits_kappa_valid[M_target_i])
        # target_kappa = 5*1e3
        target_kappa = 1e3

        dist_target_kappa = (result_em_fits_kappa_valid[M_target_i] - target_kappa)**2.

        utils.pcolor_2d_data(dist_target_kappa, log_scale=True, x=ratio_space, y=R_space, xlabel='ratio', ylabel='R', ylabel_format="%d", title='Kappa dist %.2f, M %d' % (target_kappa, M_target))
        if savefigs:
            dataio.save_current_figure('pcolor_distkappa%d_M%d_log_{label}_{unique_id}.pdf' % (target_kappa, M_target))




    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = []

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='higher_dimensions_R')

    plt.show()

    return locals()
def plots_memory_curves(data_pbs, generator_module=None):
    """
        Reload and plot memory curve of a feature code.
        Can use Marginal Fisher Information and fitted Mixture Model as well
    """

    #### SETUP
    #
    savefigs = True
    savedata = True

    plot_pcolor_fit_precision_to_fisherinfo = True
    plot_selected_memory_curves = True
    plot_best_memory_curves = True

    colormap = None  # or 'cubehelix'
    plt.rcParams["font.size"] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = utils.nanmean(
        np.squeeze(data_pbs.dict_arrays["result_all_precisions"]["results"]), axis=-1
    )
    result_all_precisions_std = utils.nanstd(
        np.squeeze(data_pbs.dict_arrays["result_all_precisions"]["results"]), axis=-1
    )
    result_em_fits_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays["result_em_fits"]["results"]), axis=-1)
    result_em_fits_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays["result_em_fits"]["results"]), axis=-1)
    result_marginal_inv_fi_mean = utils.nanmean(
        np.squeeze(data_pbs.dict_arrays["result_marginal_inv_fi"]["results"]), axis=-1
    )
    result_marginal_inv_fi_std = utils.nanstd(
        np.squeeze(data_pbs.dict_arrays["result_marginal_inv_fi"]["results"]), axis=-1
    )
    result_marginal_fi_mean = utils.nanmean(
        1.0 / np.squeeze(data_pbs.dict_arrays["result_marginal_inv_fi"]["results"]), axis=-1
    )
    result_marginal_fi_std = utils.nanstd(
        1.0 / np.squeeze(data_pbs.dict_arrays["result_marginal_inv_fi"]["results"]), axis=-1
    )

    M_space = data_pbs.loaded_data["parameters_uniques"]["M"]
    sigmax_space = data_pbs.loaded_data["parameters_uniques"]["sigmax"]
    T_space = data_pbs.loaded_data["datasets_list"][0]["T_space"]

    print M_space
    print sigmax_space
    print T_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape, result_marginal_inv_fi_mean.shape

    dataio = DataIO.DataIO(
        output_folder=generator_module.pbs_submission_infos["simul_out_dir"] + "/outputs/",
        label="global_" + dataset_infos["save_output_filename"],
    )

    ## Load Experimental data
    data_simult = load_experimental_data.load_data_simult(
        data_dir=os.path.normpath(
            os.path.join(os.path.split(load_experimental_data.__file__)[0], "../../experimental_data/")
        )
    )
    memory_experimental = data_simult["precision_nitems_theo"]

    data_bays2009 = load_experimental_data.load_data_bays09(
        data_dir=os.path.normpath(
            os.path.join(os.path.split(load_experimental_data.__file__)[0], "../../experimental_data/")
        ),
        fit_mixture_model=True,
    )
    bays09_experimental_mixtures_mean = data_bays2009["em_fits_nitems_arrays"]["mean"][1:]
    # add interpolated points for 3 and 5 items
    bays3 = (bays09_experimental_mixtures_mean[:, 2] + bays09_experimental_mixtures_mean[:, 1]) / 2.0
    bays5 = (bays09_experimental_mixtures_mean[:, -1] + bays09_experimental_mixtures_mean[:, -2]) / 2.0
    bays09_experimental_mixtures_mean_compatible = c_[
        bays09_experimental_mixtures_mean[:, :2], bays3, bays09_experimental_mixtures_mean[:, 2], bays5
    ]

    # Boost non-targets
    bays09_experimental_mixtures_mean_compatible[1] *= 1.5
    bays09_experimental_mixtures_mean_compatible[2] /= 1.5
    bays09_experimental_mixtures_mean_compatible /= np.sum(bays09_experimental_mixtures_mean_compatible, axis=0)

    # Force non target em fit mixture to be zero and not nan
    result_em_fits_mean[..., 0, 2] = 0
    result_em_fits_std[..., 0, 2] = 0

    # Compute some landscapes of fit!
    dist_diff_precision_margfi = np.sum(
        np.abs(result_all_precisions_mean * 2.0 - result_marginal_fi_mean[..., 0]) ** 2.0, axis=-1
    )
    dist_diff_precision_margfi_1item = (
        np.abs(result_all_precisions_mean[..., 0] * 2.0 - result_marginal_fi_mean[..., 0, 0]) ** 2.0
    )

    dist_diff_emkappa_margfi = np.sum(
        np.abs(result_em_fits_mean[..., 0] * 2.0 - result_marginal_fi_mean[..., 0]) ** 2.0, axis=-1
    )
    dist_ratio_emkappa_margfi = np.sum(
        np.abs((result_em_fits_mean[..., 0] * 2.0) / result_marginal_fi_mean[..., 0] - 1.0) ** 2.0, axis=-1
    )

    dist_diff_precision_experim = np.sum(np.abs(result_all_precisions_mean - memory_experimental) ** 2.0, axis=-1)
    dist_diff_precision_experim_1item = np.abs(result_all_precisions_mean[..., 0] - memory_experimental[0]) ** 2.0

    dist_diff_emkappa_experim = np.sum(np.abs(result_em_fits_mean[..., 0] - memory_experimental) ** 2.0, axis=-1)
    dist_diff_emkappa_experim_1item = np.abs(result_em_fits_mean[..., 0, 0] - memory_experimental[0]) ** 2.0

    dist_diff_margfi_experim_1item = np.abs(result_marginal_fi_mean[..., 0, 0] - memory_experimental[0]) ** 2.0

    dist_diff_emkappa_mixtures_bays09 = np.sum(
        np.sum((result_em_fits_mean[..., 1:4] - bays09_experimental_mixtures_mean_compatible.T) ** 2.0, axis=-1),
        axis=-1,
    )

    if plot_pcolor_fit_precision_to_fisherinfo:
        # Check fit between precision and fisher info
        utils.pcolor_2d_data(
            dist_diff_precision_margfi, log_scale=True, x=M_space, y=sigmax_space, xlabel="M", ylabel="sigmax"
        )

        if savefigs:
            dataio.save_current_figure("match_precision_margfi_log_pcolor_{label}_{unique_id}.pdf")

        # utils.pcolor_2d_data(dist_diff_precision_margfi, x=M_space, y=sigmax_space[2:], xlabel='M', ylabel='sigmax')
        # if savefigs:
        #    dataio.save_current_figure('match_precision_margfi_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(
            dist_diff_precision_experim, x=M_space, y=sigmax_space, xlabel="M", ylabel="sigmax", log_scale=True
        )

        utils.pcolor_2d_data(
            dist_diff_emkappa_experim, x=M_space, y=sigmax_space, xlabel="M", ylabel="sigmax", log_scale=True
        )

        utils.pcolor_2d_data(
            dist_diff_precision_margfi
            * dist_diff_emkappa_margfi
            * dist_diff_precision_experim
            * dist_diff_emkappa_experim,
            x=M_space,
            y=sigmax_space,
            xlabel="M",
            ylabel="sigmax",
            log_scale=True,
        )

        utils.pcolor_2d_data(
            dist_diff_precision_margfi_1item, log_scale=True, x=M_space, y=sigmax_space, xlabel="M", ylabel="sigmax"
        )

        utils.pcolor_2d_data(
            dist_diff_precision_experim_1item, log_scale=True, x=M_space, y=sigmax_space, xlabel="M", ylabel="sigmax"
        )

        utils.pcolor_2d_data(
            dist_diff_emkappa_experim_1item, log_scale=True, x=M_space, y=sigmax_space, xlabel="M", ylabel="sigmax"
        )

        utils.pcolor_2d_data(
            dist_diff_margfi_experim_1item, log_scale=True, x=M_space, y=sigmax_space, xlabel="M", ylabel="sigmax"
        )

        utils.pcolor_2d_data(
            dist_diff_emkappa_mixtures_bays09, log_scale=False, x=M_space, y=sigmax_space, xlabel="M", ylabel="sigmax"
        )

    if plot_selected_memory_curves:
        selected_values = [[100, 0.8], [200, 0.27], [100, 0.1], [200, 0.8], [100, 0.17], [100, 0.08], [50, 0.21]]

        for current_values in selected_values:
            # Find the indices
            M_i = np.argmin(np.abs(current_values[0] - M_space))
            sigmax_i = np.argmin(np.abs(current_values[1] - sigmax_space))

            ax = utils.plot_mean_std_area(
                T_space, memory_experimental, np.zeros(T_space.size), linewidth=3, fmt="o-", markersize=8
            )

            ax = utils.plot_mean_std_area(
                T_space,
                result_all_precisions_mean[M_i, sigmax_i],
                result_all_precisions_std[M_i, sigmax_i],
                ax_handle=ax,
                linewidth=3,
                fmt="o-",
                markersize=8,
            )

            ax = utils.plot_mean_std_area(
                T_space,
                0.5 * result_marginal_fi_mean[..., 0][M_i, sigmax_i],
                0.5 * result_marginal_fi_std[..., 0][M_i, sigmax_i],
                ax_handle=ax,
                linewidth=3,
                fmt="o-",
                markersize=8,
            )

            ax = utils.plot_mean_std_area(
                T_space,
                result_em_fits_mean[..., 0][M_i, sigmax_i],
                result_em_fits_std[..., 0][M_i, sigmax_i],
                ax_handle=ax,
                xlabel="Number of items",
                ylabel="Inverse variance $[rad^{-2}]$",
                linewidth=3,
                fmt="o-",
                markersize=8,
            )

            # ax.set_title('M %d, sigmax %.2f' % (M_space[M_i], sigmax_space[sigmax_i]))
            plt.legend(["Experimental data", "Precision of samples", "Marginal Fisher Information", "Fitted kappa"])

            ax.set_xlim([0.9, 5.1])
            ax.set_xticks(range(1, 6))
            ax.set_xticklabels(range(1, 6))

            if savefigs:
                dataio.save_current_figure(
                    "memorycurves_M%dsigmax%.2f_{label}_{unique_id}.pdf" % (M_space[M_i], sigmax_space[sigmax_i])
                )

    def em_plot_paper(sigmax_i, M_i):
        f, ax = plt.subplots()

        # Right axis, mixture probabilities
        utils.plot_mean_std_area(
            T_space,
            result_em_fits_mean[..., 1][M_i, sigmax_i],
            result_em_fits_std[..., 1][M_i, sigmax_i],
            xlabel="Number of items",
            ylabel="Mixture probabilities",
            ax_handle=ax,
            linewidth=3,
            fmt="o-",
            markersize=5,
            label="Target",
        )
        utils.plot_mean_std_area(
            T_space,
            result_em_fits_mean[..., 2][M_i, sigmax_i],
            result_em_fits_std[..., 2][M_i, sigmax_i],
            xlabel="Number of items",
            ylabel="Mixture probabilities",
            ax_handle=ax,
            linewidth=3,
            fmt="o-",
            markersize=5,
            label="Nontarget",
        )
        utils.plot_mean_std_area(
            T_space,
            result_em_fits_mean[..., 3][M_i, sigmax_i],
            result_em_fits_std[..., 3][M_i, sigmax_i],
            xlabel="Number of items",
            ylabel="Mixture probabilities",
            ax_handle=ax,
            linewidth=3,
            fmt="o-",
            markersize=5,
            label="Random",
        )

        ax.legend(prop={"size": 15})

        ax.set_title("M %d, sigmax %.2f" % (M_space[M_i], sigmax_space[sigmax_i]))
        ax.set_xlim([1.0, 5.0])
        ax.set_ylim([0.0, 1.1])
        ax.set_xticks(range(1, 6))
        ax.set_xticklabels(range(1, 6))

        f.canvas.draw()

        if savefigs:
            dataio.save_current_figure(
                "memorycurves_emfits_paper_M%.2fsigmax%.2f_{label}_{unique_id}.pdf"
                % (M_space[M_i], sigmax_space[sigmax_i])
            )

    if plot_best_memory_curves:

        # Best mixtures fit
        best_axis2_i_all = np.argmin(dist_diff_emkappa_mixtures_bays09, axis=1)
        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            em_plot_paper(best_axis2_i, axis1_i)

    all_args = data_pbs.loaded_data["args_list"]
    variables_to_save = [
        "result_all_precisions_mean",
        "result_em_fits_mean",
        "result_marginal_inv_fi_mean",
        "result_all_precisions_std",
        "result_em_fits_std",
        "result_marginal_inv_fi_std",
        "result_marginal_fi_mean",
        "result_marginal_fi_std",
        "M_space",
        "sigmax_space",
        "T_space",
        "all_args",
    ]

    if savedata:
        dataio.save_variables(variables_to_save, locals())

    # Make link to Dropbox
    dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder="memory_curves")

    plt.show()

    return locals()
def plots_ratioMscaling(data_pbs, generator_module=None):
    '''
        Reload and plot precision/fits of a Mixed code.
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plots_pcolor_all = False
    plots_effect_M_target_precision = False
    plots_multiple_precisions = False

    plots_effect_M_target_kappa = False

    plots_subpopulations_effects = False

    plots_subpopulations_effects_kappa_fi = True
    compute_fisher_info_perratioconj = True
    caching_fisherinfo_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'cache_fisherinfo.pickle')

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = (utils.nanmean(data_pbs.dict_arrays['result_all_precisions']['results'], axis=-1))
    result_all_precisions_std = (utils.nanstd(data_pbs.dict_arrays['result_all_precisions']['results'], axis=-1))
    result_em_fits_mean = (utils.nanmean(data_pbs.dict_arrays['result_em_fits']['results'], axis=-1))
    result_em_fits_std = (utils.nanstd(data_pbs.dict_arrays['result_em_fits']['results'], axis=-1))

    all_args = data_pbs.loaded_data['args_list']

    result_em_fits_kappa = result_em_fits_mean[..., 0]

    M_space = data_pbs.loaded_data['parameters_uniques']['M'].astype(int)
    ratio_space = data_pbs.loaded_data['parameters_uniques']['ratio_conj']
    num_repetitions = generator_module.num_repetitions

    print M_space
    print ratio_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape

    dataio = DataIO.DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    target_precision = 100.
    dist_to_target_precision = (result_all_precisions_mean - target_precision)**2.
    best_dist_to_target_precision = np.argmin(dist_to_target_precision, axis=1)
    MAX_DISTANCE = 100.

    ratio_target_precision_given_M = np.ma.masked_where(dist_to_target_precision[np.arange(dist_to_target_precision.shape[0]), best_dist_to_target_precision] > MAX_DISTANCE, ratio_space[best_dist_to_target_precision])

    if plots_pcolor_all:
        # Check evolution of precision given M and ratio
        utils.pcolor_2d_data(result_all_precisions_mean, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='precision wrt M / ratio')
        if savefigs:
            dataio.save_current_figure('precision_log_pcolor_{label}_{unique_id}.pdf')

        # See distance to target precision evolution
        utils.pcolor_2d_data(dist_to_target_precision, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='Dist to target precision %d' % target_precision)
        if savefigs:
            dataio.save_current_figure('dist_targetprecision_log_pcolor_{label}_{unique_id}.pdf')


        # Show kappa
        utils.pcolor_2d_data(result_em_fits_kappa, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='kappa wrt M / ratio')
        if savefigs:
            dataio.save_current_figure('kappa_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data((result_em_fits_kappa - 200)**2., log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='dist to kappa')
        if savefigs:
            dataio.save_current_figure('dist_kappa_log_pcolor_{label}_{unique_id}.pdf')


    if plots_effect_M_target_precision:
        def plot_ratio_target_precision(ratio_target_precision_given_M, target_precision):
            f, ax = plt.subplots()
            ax.plot(M_space, ratio_target_precision_given_M)
            ax.set_xlabel('M')
            ax.set_ylabel('Optimal ratio')
            ax.set_title('Optimal Ratio for precison %d' % target_precision)

            if savefigs:
                dataio.save_current_figure('effect_ratio_M_targetprecision%d_{label}_{unique_id}.pdf' % target_precision)

        plot_ratio_target_precision(ratio_target_precision_given_M, target_precision)

        if plots_multiple_precisions:
            target_precisions = np.array([100, 200, 300, 500, 1000])
            for target_precision in target_precisions:
                dist_to_target_precision = (result_all_precisions_mean - target_precision)**2.
                best_dist_to_target_precision = np.argmin(dist_to_target_precision, axis=1)
                ratio_target_precision_given_M = np.ma.masked_where(dist_to_target_precision[np.arange(dist_to_target_precision.shape[0]), best_dist_to_target_precision] > MAX_DISTANCE, ratio_space[best_dist_to_target_precision])

                # replot
                plot_ratio_target_precision(ratio_target_precision_given_M, target_precision)

    if plots_effect_M_target_kappa:
        def plot_ratio_target_kappa(ratio_target_kappa_given_M, target_kappa):
            f, ax = plt.subplots()
            ax.plot(M_space, ratio_target_kappa_given_M)
            ax.set_xlabel('M')
            ax.set_ylabel('Optimal ratio')
            ax.set_title('Optimal Ratio for precison %d' % target_kappa)

            if savefigs:
                dataio.save_current_figure('effect_ratio_M_targetkappa%d_{label}_{unique_id}.pdf' % target_kappa)

        target_kappa = np.array([100, 200, 300, 500, 1000, 3000])
        for target_kappa in target_kappa:
            dist_to_target_kappa = (result_em_fits_kappa - target_kappa)**2.
            best_dist_to_target_kappa = np.argmin(dist_to_target_kappa, axis=1)
            ratio_target_kappa_given_M = np.ma.masked_where(dist_to_target_kappa[np.arange(dist_to_target_kappa.shape[0]), best_dist_to_target_kappa] > MAX_DISTANCE, ratio_space[best_dist_to_target_kappa])

            # replot
            plot_ratio_target_kappa(ratio_target_kappa_given_M, target_kappa)

    if plots_subpopulations_effects:
        # result_all_precisions_mean
        for M_tot_selected_i, M_tot_selected in enumerate(M_space[::2]):

            M_conj_space = ((1.-ratio_space)*M_tot_selected).astype(int)
            M_feat_space = M_tot_selected - M_conj_space

            f, axes = plt.subplots(2, 2)
            axes[0, 0].plot(ratio_space, result_all_precisions_mean[2*M_tot_selected_i])
            axes[0, 0].set_xlabel('ratio')
            axes[0, 0].set_title('Measured precision')

            axes[1, 0].plot(ratio_space, M_conj_space**2*M_feat_space)
            axes[1, 0].set_xlabel('M_feat_size')
            axes[1, 0].set_title('M_c**2*M_f')

            axes[0, 1].plot(ratio_space, M_conj_space**2.)
            axes[0, 1].set_xlabel('M')
            axes[0, 1].set_title('M_c**2')

            axes[1, 1].plot(ratio_space, M_feat_space)
            axes[1, 1].set_xlabel('M')
            axes[1, 1].set_title('M_f')

            f.suptitle('M_tot %d' % M_tot_selected, fontsize=15)
            f.set_tight_layout(True)

            if savefigs:
                dataio.save_current_figure('scaling_precision_subpop_Mtot%d_{label}_{unique_id}.pdf' % M_tot_selected)

            plt.close(f)

    if plots_subpopulations_effects_kappa_fi:
        # From cache
        if caching_fisherinfo_filename is not None:
            if os.path.exists(caching_fisherinfo_filename):
                # Got file, open it and try to use its contents
                try:
                    with open(caching_fisherinfo_filename, 'r') as file_in:
                        # Load and assign values
                        cached_data = pickle.load(file_in)
                        result_fisherinfo_Mratio = cached_data['result_fisherinfo_Mratio']
                        compute_fisher_info_perratioconj = False

                except IOError:
                    print "Error while loading ", caching_fisherinfo_filename, "falling back to computing the Fisher Info"

        if compute_fisher_info_perratioconj:
            # We did not save the Fisher info, but need it if we want to fit the mixture model with fixed kappa. So recompute them using the args_dicts

            result_fisherinfo_Mratio = np.empty((M_space.size, ratio_space.size))

            # Invert the all_args_i -> M, ratio_conj direction
            parameters_indirections = data_pbs.loaded_data['parameters_dataset_index']

            for M_i, M in enumerate(M_space):
                for ratio_conj_i, ratio_conj in enumerate(ratio_space):
                    # Get index of first dataset with the current ratio_conj (no need for the others, I think)
                    try:
                        arg_index = parameters_indirections[(M, ratio_conj)][0]

                        # Now using this dataset, reconstruct a RandomFactorialNetwork and compute the fisher info
                        curr_args = all_args[arg_index]

                        # curr_args['stimuli_generation'] = lambda T: np.linspace(-np.pi*0.6, np.pi*0.6, T)

                        (_, _, _, sampler) = launchers.init_everything(curr_args)

                        # Theo Fisher info
                        result_fisherinfo_Mratio[M_i, ratio_conj_i] = sampler.estimate_fisher_info_theocov()

                        # del curr_args['stimuli_generation']
                    except KeyError:
                        result_fisherinfo_Mratio[M_i, ratio_conj_i] = np.nan


            # Save everything to a file, for faster later plotting
            if caching_fisherinfo_filename is not None:
                try:
                    with open(caching_fisherinfo_filename, 'w') as filecache_out:
                        data_cache = dict(result_fisherinfo_Mratio=result_fisherinfo_Mratio)
                        pickle.dump(data_cache, filecache_out, protocol=2)
                except IOError:
                    print "Error writing out to caching file ", caching_fisherinfo_filename

        # result_em_fits_kappa
        if False:

            for M_tot_selected_i, M_tot_selected in enumerate(M_space[::2]):

                M_conj_space = ((1.-ratio_space)*M_tot_selected).astype(int)
                M_feat_space = M_tot_selected - M_conj_space

                f, axes = plt.subplots(2, 2)
                axes[0, 0].plot(ratio_space, result_em_fits_kappa[2*M_tot_selected_i])
                axes[0, 0].set_xlabel('ratio')
                axes[0, 0].set_title('Fitted kappa')

                axes[1, 0].plot(ratio_space, utils.stddev_to_kappa(1./result_fisherinfo_Mratio[2*M_tot_selected_i]**0.5))
                axes[1, 0].set_xlabel('M_feat_size')
                axes[1, 0].set_title('kappa_FI_mixed')

                f.suptitle('M_tot %d' % M_tot_selected, fontsize=15)
                f.set_tight_layout(True)

                if savefigs:
                    dataio.save_current_figure('scaling_kappa_subpop_Mtot%d_{label}_{unique_id}.pdf' % M_tot_selected)

                plt.close(f)

        utils.pcolor_2d_data((result_fisherinfo_Mratio- 2000)**2., log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='Fisher info')
        if savefigs:
            dataio.save_current_figure('dist2000_fi_log_pcolor_{label}_{unique_id}.pdf')




    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = []

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='ratio_scaling_M')

    plt.show()

    return locals()
def plots_fisherinfo_singleitem(data_pbs, generator_module=None):
    '''
        Reload bootstrap samples, plot its histogram, fit empirical CDF and save it for quicker later use.
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plot_metric_comparison = True

    # caching_bootstrap_filename = None
    # caching_bootstrap_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'outputs', 'cache_bootstrap_misbinding_mixed.pickle')

    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_FI_rc_curv_mult = np.squeeze(data_pbs.dict_arrays['result_FI_rc_curv_mult']['results'])
    result_FI_rc_curv_all = np.squeeze(data_pbs.dict_arrays['result_FI_rc_curv_all']['results'])
    result_FI_rc_precision_mult = np.squeeze(data_pbs.dict_arrays['result_FI_rc_precision_mult']['results'])
    result_FI_rc_theo_mult = np.squeeze(data_pbs.dict_arrays['result_FI_rc_theo_mult']['results'])
    result_FI_rc_truevar_mult = np.squeeze(data_pbs.dict_arrays['result_FI_rc_truevar_mult']['results'])
    result_FI_rc_samples_mult = np.squeeze(data_pbs.dict_arrays['result_FI_rc_samples_mult']['results'])
    result_FI_rc_samples_all = np.squeeze(data_pbs.dict_arrays['result_FI_rc_samples_all']['results'])

    print result_FI_rc_curv_mult.shape
    print result_FI_rc_curv_all.shape

    dataio = DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    if plot_metric_comparison:
        values_bars = np.array([utils.nanmean(result_FI_rc_precision_mult), utils.nanmean(result_FI_rc_theo_mult[0]), utils.nanmean(result_FI_rc_theo_mult[1]), utils.nanmean(result_FI_rc_curv_all)])
        values_bars_std = np.array([utils.nanstd(result_FI_rc_precision_mult), utils.nanstd(result_FI_rc_theo_mult[0]), utils.nanstd(result_FI_rc_theo_mult[1]), utils.nanstd(result_FI_rc_curv_all)])

        set_colormap = plt.cm.cubehelix
        color_gen = [set_colormap((i+0.1)/(float(np.max((5, len(values_bars)))+0.1))) for i in xrange(np.max((5, len(values_bars))))][::-1]

        bars_indices = np.arange(values_bars.size)
        width = 0.7

        ## Plot all as bars
        f, ax = plt.subplots(figsize=(10,6))

        for bar_i in xrange(values_bars.size):
            plt.bar(bars_indices[bar_i], values_bars[bar_i], width=width, color=color_gen[bar_i], zorder=2)
            plt.errorbar(bars_indices[bar_i] + width/2., values_bars[bar_i], yerr=values_bars_std[bar_i], ecolor='k', capsize=20, capthick=2, linewidth=2, zorder=3)

        # Add the precision bar times 2
        plt.bar(bars_indices[0], 2*values_bars[0], width=width, color=color_gen[0], alpha=0.5, hatch='/', linestyle='dashed', zorder=1)
        plt.errorbar(bars_indices[0] + width/2., 2*values_bars[0], yerr=2*values_bars_std[0], ecolor='k', alpha=0.3, capsize=20, capthick=2, linewidth=2, zorder=3, linestyle='--', fmt=None)

        plt.xticks(bars_indices + width/2., ['Kappa', 'Fisher Information', 'Fisher Information\n Large N', 'Curvature'], rotation=0)
        plt.xlim((-0.2, bars_indices.size))
        f.canvas.draw()
        plt.tight_layout()


        if savefigs:
            dataio.save_current_figure('bar_fisherinfo_singleitem_metric_comparison_{label}_{unique_id}.pdf')


    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = ['nb_repetitions']

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='fisherinfo_singleitem')


    plt.show()


    return locals()
    def plots_memmixtcurves_fig6fig13(self,
                                      num_repetitions=1,
                                      use_cache=True,
                                      layout='vertical',
                                      size=6
                                      ):
        '''
            Plots the memory fidelity for all T and the mixture proportions for all T
        '''

        if self.result_em_fits_stats is None or not use_cache:
            search_progress = progress.Progress(self.fit_exp.T_space.size*num_repetitions)

            # kappa, mixt_target, mixt_nontarget, mixt_random, ll
            result_em_fits = np.nan*np.ones((self.fit_exp.T_space.size, 5, num_repetitions))

            for T_i, T in enumerate(self.fit_exp.T_space):
                self.fit_exp.setup_experimental_stimuli_T(T)

                for repet_i in xrange(num_repetitions):
                    print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())
                    print "Fit for T=%d, %d/%d" % (T, repet_i+1, num_repetitions)

                    if 'samples' in self.fit_exp.get_names_stored_responses() and repet_i < 1:
                        self.fit_exp.restore_responses('samples')
                    else:
                        self.fit_exp.sampler.force_sampling_round()
                        self.fit_exp.store_responses('samples')

                    # Fit mixture model
                    curr_params_fit = self.fit_exp.sampler.fit_mixture_model(use_all_targets=False)
                    result_em_fits[T_i, :, repet_i] = [curr_params_fit[key]
                        for key in ('kappa', 'mixt_target',
                                    'mixt_nontargets_sum', 'mixt_random',
                                    'train_LL')]

                    search_progress.increment()

            # Get stats of EM Fits
            self.result_em_fits_stats = dict(
                mean=utils.nanmean(result_em_fits, axis=-1),
                std=utils.nanstd(result_em_fits, axis=-1)
            )

        # Do the plots
        if self.do_memcurves_fig6 and self.do_mixtcurves_fig13:
            if layout == 'vertical':
                f, axes = plt.subplots(nrows=2, figsize=(size, size*2))
            else:
                f, axes = plt.subplots(ncols=2, figsize=(size*2, size))
        else:
            f, ax = plt.subplots(figsize=(size, size))
            axes = [ax]
        ax_i = 0

        if self.do_memcurves_fig6:
            self.__plot_memcurves(self.result_em_fits_stats,
                                  suptitle_text='Memory fidelity',
                                  ax=axes[ax_i]
                                  )
            ax_i += 1


        if self.do_mixtcurves_fig13:
            self.__plot_mixtcurves(self.result_em_fits_stats,
                                   suptitle_text='Mixture proportions',
                                   ax=axes[ax_i]
                                   )
            ax_i += 1

        return axes
def plots_memory_curves(data_pbs, generator_module=None):
    '''
        Reload and plot memory curve of a conjunctive code.
        Can use Marginal Fisher Information and fitted Mixture Model as well
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plot_pcolor_fit_precision_to_fisherinfo = True
    plot_selected_memory_curves = False
    plot_best_memory_curves = True

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_all_precisions_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_em_fits_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_fits_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_marginal_inv_fi_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_marginal_inv_fi']['results']), axis=-1)
    result_marginal_inv_fi_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_marginal_inv_fi']['results']), axis=-1)
    result_marginal_fi_mean = utils.nanmean(1./np.squeeze(data_pbs.dict_arrays['result_marginal_inv_fi']['results']), axis=-1)
    result_marginal_fi_std = utils.nanstd(1./np.squeeze(data_pbs.dict_arrays['result_marginal_inv_fi']['results']), axis=-1)

    M_space = data_pbs.loaded_data['parameters_uniques']['M']
    sigmax_space = data_pbs.loaded_data['parameters_uniques']['sigmax']
    T_space = data_pbs.loaded_data['datasets_list'][0]['T_space']

    print M_space
    print sigmax_space
    print T_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape, result_marginal_inv_fi_mean.shape

    dataio = DataIO.DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    ## Load Experimental data
    experim_datadir = os.environ.get('WORKDIR_DROP', os.path.split(load_experimental_data.__file__)[0])

    data_simult = load_experimental_data.load_data_simult(data_dir=os.path.normpath(os.path.join(experim_datadir, '../../experimental_data/')), fit_mixture_model=True)
    gorgo11_experimental_precision = data_simult['precision_nitems_theo']
    gorgo11_experimental_kappa = np.array([data_s['kappa'] for _, data_s in data_simult['em_fits_nitems']['mean'].items()])
    gorgo11_experimental_emfits_mean = np.array([[data[key] for _, data in data_simult['em_fits_nitems']['mean'].items()] for key in ['kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random']])
    gorgo11_experimental_emfits_std = np.array([[data[key] for _, data in data_simult['em_fits_nitems']['std'].items()] for key in ['kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random']])
    gorgo11_experimental_emfits_sem = gorgo11_experimental_emfits_std/np.sqrt(np.unique(data_simult['subject']).size)


    data_bays2009 = load_experimental_data.load_data_bays09(data_dir=os.path.normpath(os.path.join(experim_datadir, '../../experimental_data/')), fit_mixture_model=True)
    # add interpolated points for 3 and 5 items
    emfit_mean_intpfct = spint.interp1d(np.unique(data_bays2009['n_items']), data_bays2009['em_fits_nitems_arrays']['mean'])
    bays09_experimental_mixtures_mean_compatible = emfit_mean_intpfct(np.arange(1, 6))
    emfit_std_intpfct = spint.interp1d(np.unique(data_bays2009['n_items']), data_bays2009['em_fits_nitems_arrays']['std'])
    bays09_experimental_mixtures_std_compatible = emfit_std_intpfct(np.arange(1, 6))

    # Boost non-targets
    # bays09_experimental_mixtures_mean_compatible[1] *= 1.5
    # bays09_experimental_mixtures_mean_compatible[2] /= 1.5
    # bays09_experimental_mixtures_mean_compatible /= np.sum(bays09_experimental_mixtures_mean_compatible, axis=0)

    # Force non target em fit mixture to be zero and not nan
    result_em_fits_mean[..., 0, 2] = 0
    result_em_fits_std[..., 0, 2] = 0

    # Compute some landscapes of fit!
    dist_diff_precision_margfi = np.sum(np.abs(result_all_precisions_mean*2. - result_marginal_fi_mean[..., 0])**2., axis=-1)
    dist_ratio_precision_margfi = np.sum(np.abs((result_all_precisions_mean*2.)/result_marginal_fi_mean[..., 0] - 1.0)**2., axis=-1)
    dist_diff_emkappa_margfi = np.sum(np.abs(result_em_fits_mean[..., 0]*2. - result_marginal_fi_mean[..., 0])**2., axis=-1)
    dist_ratio_emkappa_margfi = np.sum(np.abs((result_em_fits_mean[..., 0]*2.)/result_marginal_fi_mean[..., 0] - 1.0)**2., axis=-1)

    dist_diff_precision_experim = np.sum(np.abs(result_all_precisions_mean - gorgo11_experimental_precision)**2., axis=-1)
    dist_diff_emkappa_experim = np.sum(np.abs(result_em_fits_mean[..., 0] - gorgo11_experimental_kappa)**2., axis=-1)

    dist_diff_precision_experim_1item = np.abs(result_all_precisions_mean[..., 0] - gorgo11_experimental_precision[0])**2.
    dist_diff_precision_experim_2item = np.abs(result_all_precisions_mean[..., 1] - gorgo11_experimental_precision[1])**2.
    dist_diff_precision_margfi_1item = np.abs(result_all_precisions_mean[..., 0]*2. - result_marginal_fi_mean[..., 0, 0])**2.
    dist_diff_emkappa_experim_1item = np.abs(result_em_fits_mean[..., 0, 0] - gorgo11_experimental_kappa[0])**2.
    dist_diff_margfi_experim_1item = np.abs(result_marginal_fi_mean[..., 0, 0] - gorgo11_experimental_precision[0])**2.

    dist_diff_emkappa_mixtures_bays09 = np.sum(np.sum((result_em_fits_mean[..., 1:4] - bays09_experimental_mixtures_mean_compatible[1:].T)**2., axis=-1), axis=-1)

    if plot_pcolor_fit_precision_to_fisherinfo:
        # Check fit between precision and fisher info
        utils.pcolor_2d_data(dist_diff_precision_margfi, log_scale=True, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax')

        if savefigs:
            dataio.save_current_figure('match_precision_margfi_log_pcolor_{label}_{unique_id}.pdf')

        # utils.pcolor_2d_data(dist_diff_precision_margfi, x=M_space, y=sigmax_space[2:], xlabel='M', ylabel='sigmax')
        # if savefigs:
        #    dataio.save_current_figure('match_precision_margfi_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_ratio_precision_margfi, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_ratio_precision_margfi_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_emkappa_margfi, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_diff_emkappa_margfi_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_ratio_emkappa_margfi, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_ratio_emkappa_margfi_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_precision_experim, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_diff_precision_experim_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_emkappa_experim, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_diff_emkappa_experim_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_precision_margfi*dist_diff_emkappa_margfi*dist_diff_precision_experim*dist_diff_emkappa_experim, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_bigmultiplication_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_precision_margfi_1item, log_scale=True, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_precision_margfi_1item_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_precision_experim_1item, log_scale=True, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_precision_experim_1item_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_emkappa_experim_1item, log_scale=True, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_emkappa_experim_1item_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_margfi_experim_1item, log_scale=True, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_margfi_experim_1item_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_emkappa_mixtures_bays09, log_scale=True, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_mixtures_experbays09_pcolor_{label}_{unique_id}.pdf')


    # Macro plot
    def mem_plot_precision(sigmax_i, M_i):
        ax = utils.plot_mean_std_area(T_space, gorgo11_experimental_precision, np.zeros(T_space.size), linewidth=3, fmt='o-', markersize=8, label='Experimental data')

        ax = utils.plot_mean_std_area(T_space, result_all_precisions_mean[M_i, sigmax_i], result_all_precisions_std[M_i, sigmax_i], ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Precision of samples')

        ax = utils.plot_mean_std_area(T_space, 0.5*result_marginal_fi_mean[..., 0][M_i, sigmax_i], 0.5*result_marginal_fi_std[..., 0][M_i, sigmax_i], ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Marginal Fisher Information')

        # ax = utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 0][M_i, sigmax_i], result_em_fits_std[..., 0][M_i, sigmax_i], ax_handle=ax, xlabel='Number of items', ylabel="Inverse variance $[rad^{-2}]$", linewidth=3, fmt='o-', markersize=8, label='Fitted kappa')

        ax.set_title('M %d, sigmax %.2f' % (M_space[M_i], sigmax_space[sigmax_i]))
        ax.legend()
        ax.set_xlim([0.9, 5.1])
        ax.set_xticks(range(1, 6))
        ax.set_xticklabels(range(1, 6))

        ax.get_figure().canvas.draw()

        if savefigs:
            dataio.save_current_figure('memorycurves_precision_M%dsigmax%.2f_{label}_{unique_id}.pdf' % (M_space[M_i], sigmax_space[sigmax_i]))


    def mem_plot_kappa(sigmax_i, M_i, experim_data_mean, experim_data_std=None):
        ax = utils.plot_mean_std_area(T_space, experim_data_mean, experim_data_std, linewidth=3, fmt='o-', markersize=8, label='Experimental data')

        ax = utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 0][M_i, sigmax_i], result_em_fits_std[..., 0][M_i, sigmax_i], xlabel='Number of items', ylabel="Inverse variance $[rad^{-2}]$", ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Fitted kappa')

        # ax = utils.plot_mean_std_area(T_space, 0.5*result_marginal_fi_mean[..., 0][M_i, sigmax_i], 0.5*result_marginal_fi_std[..., 0][M_i, sigmax_i], ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Marginal Fisher Information')

        ax.set_title('M %d, sigmax %.2f' % (M_space[M_i], sigmax_space[sigmax_i]))
        ax.legend()
        ax.set_xlim([0.9, 5.1])
        ax.set_xticks(range(1, 6))
        ax.set_xticklabels(range(1, 6))

        ax.get_figure().canvas.draw()

        if savefigs:
            dataio.save_current_figure('memorycurves_kappa_M%dsigmax%.2f_{label}_{unique_id}.pdf' % (M_space[M_i], sigmax_space[sigmax_i]))

    def em_plot_paper(sigmax_i, M_i):
        f, ax = plt.subplots()

        # Right axis, mixture probabilities
        utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 1][M_i, sigmax_i], result_em_fits_std[..., 1][M_i, sigmax_i], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Target')
        utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 2][M_i, sigmax_i], result_em_fits_std[..., 2][M_i, sigmax_i], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Nontarget')
        utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 3][M_i, sigmax_i], result_em_fits_std[..., 3][M_i, sigmax_i], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Random')

        ax.legend(prop={'size':15})

        ax.set_title('M %d, sigmax %.2f' % (M_space[M_i], sigmax_space[sigmax_i]))
        ax.set_xlim([1.0, 5.0])
        ax.set_ylim([0.0, 1.1])
        ax.set_xticks(range(1, 6))
        ax.set_xticklabels(range(1, 6))

        f.canvas.draw()

        if savefigs:
            dataio.save_current_figure('memorycurves_emfits_paper_M%.2fsigmax%.2f_{label}_{unique_id}.pdf' % (M_space[M_i], sigmax_space[sigmax_i]))


    if plot_selected_memory_curves:
        selected_values = [[100, 0.1], [196, 0.28], [64, 0.05]]

        for current_values in selected_values:
            # Find the indices
            M_i         = np.argmin(np.abs(current_values[0] - M_space))
            sigmax_i    = np.argmin(np.abs(current_values[1] - sigmax_space))

            mem_plot_precision(sigmax_i, M_i)

            mem_plot_kappa(sigmax_i, M_i)


    if plot_best_memory_curves:
        # Best precision fit
        best_axis2_i_all = np.argmin(dist_diff_precision_experim, axis=1)

        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            mem_plot_precision(best_axis2_i, axis1_i)

        # Best kappa fit
        best_axis2_i_all = np.argmin(dist_diff_emkappa_experim, axis=1)

        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            mem_plot_kappa(best_axis2_i, axis1_i, gorgo11_experimental_emfits_mean[0], gorgo11_experimental_emfits_std[0])

            mem_plot_precision(best_axis2_i, axis1_i)

        # Best mixtures fit for Bays09
        best_axis2_i_all = np.argmin(dist_diff_emkappa_mixtures_bays09, axis=1)

        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            em_plot_paper(best_axis2_i, axis1_i)



    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = ['gorgo11_experimental_precision', 'gorgo11_experimental_kappa']

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='memory_curves')

    plt.show()

    return locals()
def plots_ratioMscaling(data_pbs, generator_module=None):
    '''
        Reload and plot precision/fits of a Mixed code.
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plots_pcolor_all = False
    plots_effect_M_target_kappa = False

    plots_kappa_fi_comparison = False
    plots_multiple_fisherinfo = False
    specific_plot_effect_R = True
    specific_plot_effect_ratio_M = False

    convert_M_realsizes = False

    plots_pcolor_realsizes_Msubs = True
    plots_pcolor_realsizes_Mtot = True


    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    # interpolation_method = 'linear'
    interpolation_method = 'nearest'
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = (utils.nanmean(data_pbs.dict_arrays['result_all_precisions']['results'], axis=-1))
    result_all_precisions_std = (utils.nanstd(data_pbs.dict_arrays['result_all_precisions']['results'], axis=-1))
    result_em_fits_mean = (utils.nanmean(data_pbs.dict_arrays['result_em_fits']['results'], axis=-1))
    result_em_fits_std = (utils.nanstd(data_pbs.dict_arrays['result_em_fits']['results'], axis=-1))
    result_fisherinfo_mean = (utils.nanmean(data_pbs.dict_arrays['result_fisher_info']['results'], axis=-1))
    result_fisherinfo_std = (utils.nanstd(data_pbs.dict_arrays['result_fisher_info']['results'], axis=-1))

    all_args = data_pbs.loaded_data['args_list']

    result_em_fits_kappa = result_em_fits_mean[..., 0]
    result_em_fits_target = result_em_fits_mean[..., 1]
    result_em_fits_kappa_valid = np.ma.masked_where(result_em_fits_target < 0.8, result_em_fits_kappa)

    # flat versions
    result_parameters_flat = np.array(data_pbs.dict_arrays['result_all_precisions']['parameters_flat'])
    result_all_precisions_mean_flat = np.mean(np.array(data_pbs.dict_arrays['result_all_precisions']['results_flat']), axis=-1)
    result_em_fits_mean_flat = np.mean(np.array(data_pbs.dict_arrays['result_em_fits']['results_flat']), axis=-1)
    result_fisherinfor_mean_flat = np.mean(np.array(data_pbs.dict_arrays['result_fisher_info']['results_flat']), axis=-1)
    result_em_fits_kappa_flat = result_em_fits_mean_flat[..., 0]
    result_em_fits_target_flat = result_em_fits_mean_flat[..., 1]
    result_em_fits_kappa_valid_flat = np.ma.masked_where(result_em_fits_target_flat < 0.8, result_em_fits_kappa_flat)



    M_space = data_pbs.loaded_data['parameters_uniques']['M'].astype(int)
    ratio_space = data_pbs.loaded_data['parameters_uniques']['ratio_conj']
    R_space = data_pbs.loaded_data['parameters_uniques']['R'].astype(int)
    num_repetitions = generator_module.num_repetitions
    T = generator_module.T

    print M_space
    print ratio_space
    print R_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape

    dataio = DataIO.DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    MAX_DISTANCE = 100.

    if convert_M_realsizes:
        # alright, currently M*ratio_conj gives the conjunctive subpopulation,
        # but only floor(M_conj**1/R) neurons are really used. So we should
        # convert to M_conj_real and M_feat_real instead of M and ratio
        result_parameters_flat_subM_converted = []
        result_parameters_flat_Mtot_converted = []

        for params in result_parameters_flat:
            M = params[0]; ratio_conj = params[1]; R = int(params[2])

            M_conj_prior = int(M*ratio_conj)
            M_conj_true = int(np.floor(M_conj_prior**(1./R))**R)
            M_feat_true = int(np.floor((M-M_conj_prior)/R)*R)

            # result_parameters_flat_subM_converted contains (M_conj, M_feat, R)
            result_parameters_flat_subM_converted.append(np.array([M_conj_true, M_feat_true, R]))
            # result_parameters_flat_Mtot_converted contains (M_tot, ratio_conj, R)
            result_parameters_flat_Mtot_converted.append(np.array([float(M_conj_true+M_feat_true), float(M_conj_true)/float(M_conj_true+M_feat_true), R]))

        result_parameters_flat_subM_converted = np.array(result_parameters_flat_subM_converted)
        result_parameters_flat_Mtot_converted = np.array(result_parameters_flat_Mtot_converted)

    if plots_pcolor_all:
        if convert_M_realsizes:
            def plot_interp(points, data, currR_indices, title='', points_label='', xlabel='', ylabel=''):
                utils.contourf_interpolate_data_interactive_maxvalue(points[currR_indices][..., :2], data[currR_indices], xlabel=xlabel, ylabel=ylabel, title='%s, R=%d' % (title, R), interpolation_numpoints=200, interpolation_method=interpolation_method, log_scale=False)

                if savefigs:
                    dataio.save_current_figure('pcolortrueM%s_%s_R%d_log_%s_{label}_{unique_id}.pdf' % (points_label, title, R, interpolation_method))

            all_datas = [dict(name='precision', data=result_all_precisions_mean_flat), dict(name='kappa', data=result_em_fits_kappa_flat), dict(name='kappavalid', data=result_em_fits_kappa_valid_flat), dict(name='target', data=result_em_fits_target_flat), dict(name='fisherinfo', data=result_fisherinfor_mean_flat)]
            all_points = []
            if plots_pcolor_realsizes_Msubs:
                all_points.append(dict(name='sub', data=result_parameters_flat_subM_converted, xlabel='M_conj', ylabel='M_feat'))
            if plots_pcolor_realsizes_Mtot:
                all_points.append(dict(name='tot', data=result_parameters_flat_Mtot_converted, xlabel='Mtot', ylabel='ratio_conj'))

            for curr_points in all_points:
                for curr_data in all_datas:
                    for R_i, R in enumerate(R_space):
                        currR_indices = curr_points['data'][:, 2] == R

                        plot_interp(curr_points['data'], curr_data['data'], currR_indices, title=curr_data['name'], points_label=curr_points['name'], xlabel=curr_points['xlabel'], ylabel=curr_points['ylabel'])


        else:
            # Do one pcolor for M and ratio per R
            for R_i, R in enumerate(R_space):
                # Check evolution of precision given M and ratio
                utils.pcolor_2d_data(result_all_precisions_mean[..., R_i], log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='precision, R=%d' % R)
                if savefigs:
                    dataio.save_current_figure('pcolor_precision_R%d_log_{label}_{unique_id}.pdf' % R)

                # Show kappa
                try:
                    utils.pcolor_2d_data(result_em_fits_kappa_valid[..., R_i], log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='kappa, R=%d' % R)
                    if savefigs:
                        dataio.save_current_figure('pcolor_kappa_R%d_log_{label}_{unique_id}.pdf' % R)
                except ValueError:
                    pass

                # Show probability on target
                utils.pcolor_2d_data(result_em_fits_target[..., R_i], log_scale=False, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='target, R=%d' % R)
                if savefigs:
                    dataio.save_current_figure('pcolor_target_R%d_{label}_{unique_id}.pdf' % R)

                # # Show Fisher info
                utils.pcolor_2d_data(result_fisherinfo_mean[..., R_i], log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='fisher info, R=%d' % R)
                if savefigs:
                    dataio.save_current_figure('pcolor_fisherinfo_R%d_log_{label}_{unique_id}.pdf' % R)

                plt.close('all')

    if plots_effect_M_target_kappa:
        def plot_ratio_target_kappa(ratio_target_kappa_given_M, target_kappa, R):
            f, ax = plt.subplots()
            ax.plot(M_space, ratio_target_kappa_given_M)
            ax.set_xlabel('M')
            ax.set_ylabel('Optimal ratio')
            ax.set_title('Optimal Ratio for kappa %d, R=%d' % (target_kappa, R))

            if savefigs:
                dataio.save_current_figure('optratio_M_targetkappa%d_R%d_{label}_{unique_id}.pdf' % (target_kappa, R))

        target_kappas = np.array([100, 200, 300, 500, 1000, 3000])
        for R_i, R in enumerate(R_space):
            for target_kappa in target_kappas:
                dist_to_target_kappa = (result_em_fits_kappa[..., R_i] - target_kappa)**2.
                best_dist_to_target_kappa = np.argmin(dist_to_target_kappa, axis=1)
                ratio_target_kappa_given_M = np.ma.masked_where(dist_to_target_kappa[np.arange(dist_to_target_kappa.shape[0]), best_dist_to_target_kappa] > MAX_DISTANCE, ratio_space[best_dist_to_target_kappa])

                # replot
                plot_ratio_target_kappa(ratio_target_kappa_given_M, target_kappa, R)

            plt.close('all')


    if plots_kappa_fi_comparison:

        # result_em_fits_kappa and fisher info
        if True:
            for R_i, R in enumerate(R_space):
                for M_tot_selected_i, M_tot_selected in enumerate(M_space[::2]):

                    # M_conj_space = ((1.-ratio_space)*M_tot_selected).astype(int)
                    # M_feat_space = M_tot_selected - M_conj_space

                    f, axes = plt.subplots(2, 1)
                    axes[0].plot(ratio_space, result_em_fits_kappa[2*M_tot_selected_i, ..., R_i])
                    axes[0].set_xlabel('ratio')
                    axes[0].set_title('Fitted kappa')

                    axes[1].plot(ratio_space, utils.stddev_to_kappa(1./result_fisherinfo_mean[2*M_tot_selected_i, ..., R_i]**0.5))
                    axes[1].set_xlabel('ratio')
                    axes[1].set_title('kappa_FI')

                    f.suptitle('M_tot %d' % M_tot_selected, fontsize=15)
                    f.set_tight_layout(True)

                    if savefigs:
                        dataio.save_current_figure('comparison_kappa_fisher_R%d_M%d_{label}_{unique_id}.pdf' % (R, M_tot_selected))

                    plt.close(f)

        if plots_multiple_fisherinfo:
            target_fisherinfos = np.array([100, 200, 300, 500, 1000])
            for R_i, R in enumerate(R_space):
                for target_fisherinfo in target_fisherinfos:
                    dist_to_target_fisherinfo = (result_fisherinfo_mean[..., R_i] - target_fisherinfo)**2.

                    utils.pcolor_2d_data(dist_to_target_fisherinfo, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='Fisher info, R=%d' % R)
                    if savefigs:
                        dataio.save_current_figure('pcolor_distfi%d_R%d_log_{label}_{unique_id}.pdf' % (target_fisherinfo, R))

                plt.close('all')

    if specific_plot_effect_R:
        M_target = 356
        if convert_M_realsizes:
            M_tot_target = M_target
            delta_around_target = 30

            filter_points_totM_indices = np.abs(result_parameters_flat_Mtot_converted[:, 0] - M_tot_target) < delta_around_target

            # first check landscape
            utils.contourf_interpolate_data_interactive_maxvalue(result_parameters_flat_Mtot_converted[filter_points_totM_indices][..., 1:], result_em_fits_kappa_flat[filter_points_totM_indices], xlabel='ratio_conj', ylabel='R', title='kappa, M_target=%d +- %d' % (M_tot_target, delta_around_target), interpolation_numpoints=200, interpolation_method=interpolation_method, log_scale=False, show_slider=False)

            if savefigs:
                dataio.save_current_figure('specific_pcolortrueMtot_kappa_M%d_log_%s_{label}_{unique_id}.pdf' % (M_tot_target, interpolation_method))

            # Then plot distance to specific kappa
            # target_kappa = 1.2e3
            target_kappa = 580
            dist_target_kappa_flat = np.abs(result_em_fits_kappa_flat - target_kappa)
            mask_greater_than = 5e3

            utils.contourf_interpolate_data_interactive_maxvalue(result_parameters_flat_Mtot_converted[filter_points_totM_indices][..., 1:], dist_target_kappa_flat[filter_points_totM_indices], xlabel='ratio_conj', ylabel='R', title='dist kappa %d, M_target=%d +- %d' % (target_kappa, M_tot_target, delta_around_target), interpolation_numpoints=200, interpolation_method=interpolation_method, log_scale=False, mask_greater_than=mask_greater_than, mask_smaller_than=0)

            if savefigs:
                dataio.save_current_figure('specific_pcolortrueMtot_distkappa%d_M%d_log_%s_{label}_{unique_id}.pdf' % (target_kappa, M_tot_target, interpolation_method))

        else:
            # Choose a M, find which ratio gives best fit to a given kappa
            M_target_i = np.argmin(np.abs(M_space - M_target))

            utils.pcolor_2d_data(result_em_fits_kappa[M_target_i], log_scale=True, x=ratio_space, y=R_space, xlabel='ratio', ylabel='R', ylabel_format="%d", title='Kappa, M %d' % (M_target))
            plt.gcf().set_tight_layout(True)
            plt.gcf().canvas.draw()
            if savefigs:
                dataio.save_current_figure('specific_Reffect_pcolor_kappa_M%dT%d_log_{label}_{unique_id}.pdf' % (M_target, T))
            # target_kappa = np.ma.mean(result_em_fits_kappa_valid[M_target_i])
            # target_kappa = 5*1e3
            # target_kappa = 1.2e3
            target_kappa = 580

            # dist_target_kappa = np.abs(result_em_fits_kappa_valid[M_target_i] - target_kappa)
            dist_target_kappa = result_em_fits_kappa[M_target_i]/target_kappa
            dist_target_kappa[dist_target_kappa > 2.0] = 2.0
            # dist_target_kappa[dist_target_kappa < 0.5] = 0.5

            utils.pcolor_2d_data(dist_target_kappa, log_scale=False, x=ratio_space, y=R_space, xlabel='ratio', ylabel='R', ylabel_format="%d", title='Kappa dist %.2f, M %d' % (target_kappa, M_target), cmap='RdBu_r')
            plt.gcf().set_tight_layout(True)
            plt.gcf().canvas.draw()
            if savefigs:
                dataio.save_current_figure('specific_Reffect_pcolor_distkappa%d_M%dT%d_log_{label}_{unique_id}.pdf' % (target_kappa, M_target, T))

            # Plot the probability of being on-target
            utils.pcolor_2d_data(result_em_fits_target[M_target_i], log_scale=False, x=ratio_space, y=R_space, xlabel='ratio', ylabel='R', ylabel_format="%d", title='target mixture proportion, M %d' % (M_target), vmin=0.0, vmax=1.0)  #cmap='RdBu_r'
            plt.gcf().set_tight_layout(True)
            plt.gcf().canvas.draw()
            if savefigs:
                dataio.save_current_figure('specific_Reffect_pcolor_target_M%dT%d_{label}_{unique_id}.pdf' % (M_target, T))



    if specific_plot_effect_ratio_M:
        # try to do the same plots as in ratio_scaling_M but with the current data
        R_target = 2
        interpolation_method = 'cubic'
        mask_greater_than = 1e3

        if convert_M_realsizes:
            # filter_points_targetR_indices = np.abs(result_parameters_flat_Mtot_converted[:, -1] - R_target) == 0
            filter_points_targetR_indices = np.abs(result_parameters_flat_subM_converted[:, -1] - R_target) == 0

            # utils.contourf_interpolate_data_interactive_maxvalue(result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., :-1], result_em_fits_kappa_flat[filter_points_targetR_indices], xlabel='M', ylabel='ratio_conj', title='kappa, R=%d' % (R_target), interpolation_numpoints=200, interpolation_method=interpolation_method, log_scale=False)
            utils.contourf_interpolate_data_interactive_maxvalue(result_parameters_flat_subM_converted[filter_points_targetR_indices][..., :-1], result_em_fits_kappa_flat[filter_points_targetR_indices], xlabel='M_conj', ylabel='M_feat', title='kappa, R=%d' % (R_target), interpolation_numpoints=200, interpolation_method=interpolation_method, log_scale=False, show_slider=False)

            if savefigs:
                dataio.save_current_figure('specific_ratioM_pcolorsubM_kappa_R%d_log_%s_{label}_{unique_id}.pdf' % (R_target, interpolation_method))

            # Then plot distance to specific kappa
            target_kappa = 580
            # target_kappa = 2000
            dist_target_kappa_flat = np.abs(result_em_fits_kappa_flat - target_kappa)
            dist_target_kappa_flat = result_em_fits_kappa_flat/target_kappa
            dist_target_kappa_flat[dist_target_kappa_flat > 1.45] = 1.45
            dist_target_kappa_flat[dist_target_kappa_flat < 0.5] = 0.5

            # utils.contourf_interpolate_data_interactive_maxvalue(result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., :-1], dist_target_kappa_flat[filter_points_targetR_indices], xlabel='M', ylabel='ratio_conj', title='kappa, R=%d' % (R_target), interpolation_numpoints=200, interpolation_method=interpolation_method, log_scale=False, mask_smaller_than=0, show_slider=False, mask_greater_than =mask_greater_than)

            # utils.contourf_interpolate_data_interactive_maxvalue(result_parameters_flat[filter_points_targetR_indices][..., :-1], dist_target_kappa_flat[filter_points_targetR_indices], xlabel='M', ylabel='ratio_conj', title='kappa, R=%d' % (R_target), interpolation_numpoints=200, interpolation_method=interpolation_method, log_scale=False, mask_smaller_than=0, show_slider=False, mask_greater_than =mask_greater_than)

            utils.contourf_interpolate_data_interactive_maxvalue(result_parameters_flat_subM_converted[filter_points_targetR_indices][..., :-1], dist_target_kappa_flat[filter_points_targetR_indices], xlabel='M_conj', ylabel='M_feat', title='Kappa dist %.2f, R=%d' % (target_kappa, R_target), interpolation_numpoints=200, interpolation_method=interpolation_method, log_scale=False, mask_greater_than=mask_greater_than, mask_smaller_than=0, show_slider=False)

            if savefigs:
                dataio.save_current_figure('specific_ratioM_pcolorsubM_distkappa%d_R%d_log_%s_{label}_{unique_id}.pdf' % (target_kappa, R_target, interpolation_method))


            ## Scatter plot, works better...

            f, ax = plt.subplots()
            ss = ax.scatter(result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 0], result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 1], 500, c=(dist_target_kappa_flat[filter_points_targetR_indices]),
                norm=matplotlib.colors.LogNorm())
            plt.colorbar(ss)
            ax.set_xlabel('M')
            ax.set_ylabel('ratio')
            ax.set_xlim((result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 0].min()*0.8, result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 0].max()*1.03))
            ax.set_ylim((-0.05, result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 1].max()*1.05))
            ax.set_title('Kappa dist %.2f, R=%d' % (target_kappa, R_target))

            if savefigs:
                dataio.save_current_figure('specific_ratioM_scattertotM_distkappa%d_R%d_log_%s_{label}_{unique_id}.pdf' % (target_kappa, R_target, interpolation_method))


            ## Spline interpolation
            distkappa_spline_int_params = spint.bisplrep(result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 0], result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 1], np.log(dist_target_kappa_flat[filter_points_targetR_indices]), kx=3, ky=3, s=1)
            if True:
                M_interp_space = np.linspace(100, 740, 100)
                ratio_interp_space = np.linspace(0.0, 1.0, 100)
                # utils.pcolor_2d_data(spint.bisplev(M_interp_space, ratio_interp_space, distkappa_spline_int_params), y=ratio_interp_space, x=M_interp_space, ylabel='ratio', xlabel='M', xlabel_format="%d", title='Kappa dist %.2f, R %d' % (target_kappa, R_target), ticks_interpolate=11)
                utils.pcolor_2d_data(np.exp(spint.bisplev(M_interp_space, ratio_interp_space, distkappa_spline_int_params)), y=ratio_interp_space, x=M_interp_space, ylabel='ratio conjunctivity', xlabel='M', xlabel_format="%d", title='Ratio kappa/%.2f, R %d' % (target_kappa, R_target), ticks_interpolate=11, log_scale=False, cmap='RdBu_r')
                # plt.scatter(result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 0], result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 1], marker='o', c='b', s=5)
                # plt.scatter(np.argmin(np.abs(M_interp_space[:, np.newaxis] - result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 0]), axis=0), np.argmin(np.abs(ratio_interp_space[:, np.newaxis] - result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 1]), axis=0), marker='o', c='b', s=5)
            else:
                M_interp_space = M_space
                ratio_interp_space = ratio_space
                utils.pcolor_2d_data(spint.bisplev(M_interp_space, ratio_interp_space, distkappa_spline_int_params), y=ratio_interp_space, x=M_interp_space, ylabel='ratio', xlabel='M', xlabel_format="%d", title='Kappa dist %.2f, R %d' % (target_kappa, R_target))


            plt.gcf().set_tight_layout(True)
            plt.gcf().canvas.draw()
            if savefigs:
                dataio.save_current_figure('specific_ratioM_pcolorsplinetotM_distkappa%d_R%d_log_%s_{label}_{unique_id}.pdf' % (target_kappa, R_target, interpolation_method))

            ### Distance to Fisher Info

            target_fi = 2*target_kappa
            dist_target_fi_flat = np.abs(result_fisherinfor_mean_flat - target_fi)

            dist_target_fi_flat = result_fisherinfor_mean_flat/target_fi
            dist_target_fi_flat[dist_target_fi_flat > 1.45] = 1.45
            dist_target_fi_flat[dist_target_fi_flat < 0.5] = 0.5

            # utils.contourf_interpolate_data_interactive_maxvalue(result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., :-1], dist_target_kappa_flat[filter_points_targetR_indices], xlabel='M', ylabel='ratio_conj', title='kappa, R=%d' % (R_target), interpolation_numpoints=200, interpolation_method=interpolation_method, log_scale=False)
            utils.contourf_interpolate_data_interactive_maxvalue(result_parameters_flat_subM_converted[filter_points_targetR_indices][..., :-1], dist_target_fi_flat[filter_points_targetR_indices], xlabel='M_conj', ylabel='M_feat', title='FI dist %.2f, R=%d' % (target_fi, R_target), interpolation_numpoints=200, interpolation_method=interpolation_method, log_scale=False, mask_greater_than=mask_greater_than, show_slider=False)

            if savefigs:
                dataio.save_current_figure('specific_ratioM_pcolorsubM_distfi%d_R%d_log_%s_{label}_{unique_id}.pdf' % (target_fi, R_target, interpolation_method))


            distFI_spline_int_params = spint.bisplrep(result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 0], result_parameters_flat_Mtot_converted[filter_points_targetR_indices][..., 1], np.log(dist_target_fi_flat[filter_points_targetR_indices]), kx=3, ky=3, s=1)

            M_interp_space = np.linspace(100, 740, 100)
            ratio_interp_space = np.linspace(0.0, 1.0, 100)
            # utils.pcolor_2d_data(spint.bisplev(M_interp_space, ratio_interp_space, spline_int_params), y=ratio_interp_space, x=M_interp_space, ylabel='ratio', xlabel='M', xlabel_format="%d", title='Kappa dist %.2f, R %d' % (target_kappa, R_target), ticks_interpolate=11)
            utils.pcolor_2d_data(np.exp(spint.bisplev(M_interp_space, ratio_interp_space, distFI_spline_int_params)), y=ratio_interp_space, x=M_interp_space, ylabel='ratio conjunctivity', xlabel='M', xlabel_format="%d", title='Ratio FI/%.2f, R %d' % (target_fi, R_target), ticks_interpolate=11, log_scale=False, cmap='RdBu_r')
            plt.gcf().set_tight_layout(True)
            plt.gcf().canvas.draw()
            if savefigs:
                dataio.save_current_figure('specific_ratioM_pcolorsplinetotM_distfi%d_R%d_log_%s_{label}_{unique_id}.pdf' % (target_fi, R_target, interpolation_method))



        else:
            R_target_i = np.argmin(np.abs(R_space - R_target))

            utils.pcolor_2d_data(result_em_fits_kappa_valid[..., R_target_i], log_scale=True, y=ratio_space, x=M_space, ylabel='ratio', xlabel='M', xlabel_format="%d", title='Kappa, R %d' % (R_target))
            if savefigs:
                dataio.save_current_figure('specific_ratioM_pcolor_kappa_R%d_log_{label}_{unique_id}.pdf' % (R_target))
            # target_kappa = np.ma.mean(result_em_fits_kappa_valid[R_target_i])
            # target_kappa = 5*1e3
            target_kappa = 580

            dist_target_kappa = np.ma.masked_greater(np.abs(result_em_fits_kappa_valid[..., R_target_i] - target_kappa), mask_greater_than*5)

            utils.pcolor_2d_data(dist_target_kappa, log_scale=True, y=ratio_space, x=M_space, ylabel='ratio', xlabel='M', xlabel_format="%d", title='Kappa dist %.2f, R %d' % (target_kappa, R_target))
            if savefigs:
                dataio.save_current_figure('specific_ratioM_pcolor_distkappa%d_R%d_log_{label}_{unique_id}.pdf' % (target_kappa, R_target))






    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = []

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='higher_dimensions_R')

    plt.show()

    return locals()
def plots_specific_stimuli_mixed(data_pbs, generator_module=None):
    '''
        Reload and plot behaviour of mixed population code on specific Stimuli
        of 3 items.
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plot_per_min_dist_all = False
    specific_plots_paper = False
    plots_emfit_allitems = False
    plot_min_distance_effect = True

    compute_bootstraps = False

    should_fit_allitems_model = True
    # caching_emfit_filename = None
    mixturemodel_to_use = 'allitems_uniquekappa'
    # caching_emfit_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'cache_emfitallitems_uniquekappa.pickle')
    # mixturemodel_to_use = 'allitems_fikappa'

    caching_emfit_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'cache_emfit%s.pickle' % mixturemodel_to_use)

    compute_fisher_info_perratioconj = True
    caching_fisherinfo_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'cache_fisherinfo.pickle')

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    all_args = data_pbs.loaded_data['args_list']
    result_all_precisions_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_all_precisions_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_em_fits_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_fits_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_kappastddev_mean = utils.nanmean(utils.kappa_to_stddev(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results'])[..., 0, :]), axis=-1)
    result_em_kappastddev_std = utils.nanstd(utils.kappa_to_stddev(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results'])[..., 0, :]), axis=-1)
    result_responses_all = np.squeeze(data_pbs.dict_arrays['result_responses']['results'])
    result_target_all = np.squeeze(data_pbs.dict_arrays['result_target']['results'])
    result_nontargets_all = np.squeeze(data_pbs.dict_arrays['result_nontargets']['results'])

    nb_repetitions = result_responses_all.shape[-1]
    K = result_nontargets_all.shape[-2]
    N = result_responses_all.shape[-2]

    enforce_min_distance_space = data_pbs.loaded_data['parameters_uniques']['enforce_min_distance']
    sigmax_space = data_pbs.loaded_data['parameters_uniques']['sigmax']
    ratio_space = data_pbs.loaded_data['datasets_list'][0]['ratio_space']

    print enforce_min_distance_space
    print sigmax_space
    print ratio_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape
    print result_responses_all.shape

    dataio = DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    # Reload cached emfitallitems
    if caching_emfit_filename is not None:
        if os.path.exists(caching_emfit_filename):
            # Got file, open it and try to use its contents
            try:
                with open(caching_emfit_filename, 'r') as file_in:
                    # Load and assign values
                    print "Reloader EM fits from cache", caching_emfit_filename
                    cached_data = pickle.load(file_in)
                    result_emfitallitems = cached_data['result_emfitallitems']
                    mixturemodel_used = cached_data.get('mixturemodel_used', '')

                    if mixturemodel_used != mixturemodel_to_use:
                        print "warning, reloaded model used a different mixture model class"
                    should_fit_allitems_model = False

            except IOError:
                print "Error while loading ", caching_emfit_filename, "falling back to computing the EM fits"


    # Load the Fisher Info from cache if exists. If not, compute it.
    if caching_fisherinfo_filename is not None:
        if os.path.exists(caching_fisherinfo_filename):
            # Got file, open it and try to use its contents
            try:
                with open(caching_fisherinfo_filename, 'r') as file_in:
                    # Load and assign values
                    cached_data = pickle.load(file_in)
                    result_fisherinfo_mindist_sigmax_ratio = cached_data['result_fisherinfo_mindist_sigmax_ratio']
                    compute_fisher_info_perratioconj = False

            except IOError:
                print "Error while loading ", caching_fisherinfo_filename, "falling back to computing the Fisher Info"

    if compute_fisher_info_perratioconj:
        # We did not save the Fisher info, but need it if we want to fit the mixture model with fixed kappa. So recompute them using the args_dicts

        result_fisherinfo_mindist_sigmax_ratio = np.empty((enforce_min_distance_space.size, sigmax_space.size, ratio_space.size))

        # Invert the all_args_i -> min_dist, sigmax indexing
        parameters_indirections = data_pbs.loaded_data['parameters_dataset_index']

        # min_dist_i, sigmax_level_i, ratio_i
        for min_dist_i, min_dist in enumerate(enforce_min_distance_space):
            for sigmax_i, sigmax in enumerate(sigmax_space):
                # Get index of first dataset with the current (min_dist, sigmax) (no need for the others, I think)
                arg_index = parameters_indirections[(min_dist, sigmax)][0]

                # Now using this dataset, reconstruct a RandomFactorialNetwork and compute the fisher info
                curr_args = all_args[arg_index]

                for ratio_conj_i, ratio_conj in enumerate(ratio_space):
                    # Update param
                    curr_args['ratio_conj'] = ratio_conj
                    # curr_args['stimuli_generation'] = 'specific_stimuli'

                    (_, _, _, sampler) = launchers.init_everything(curr_args)

                    # Theo Fisher info
                    result_fisherinfo_mindist_sigmax_ratio[min_dist_i, sigmax_i, ratio_conj_i] = sampler.estimate_fisher_info_theocov()

                    print "Min dist: %.2f, Sigmax: %.2f, Ratio: %.2f: %.3f" % (min_dist, sigmax, ratio_conj, result_fisherinfo_mindist_sigmax_ratio[min_dist_i, sigmax_i, ratio_conj_i])


        # Save everything to a file, for faster later plotting
        if caching_fisherinfo_filename is not None:
            try:
                with open(caching_fisherinfo_filename, 'w') as filecache_out:
                    data_cache = dict(result_fisherinfo_mindist_sigmax_ratio=result_fisherinfo_mindist_sigmax_ratio)
                    pickle.dump(data_cache, filecache_out, protocol=2)
            except IOError:
                print "Error writing out to caching file ", caching_fisherinfo_filename


    if plot_per_min_dist_all:
        # Do one plot per min distance.
        for min_dist_i, min_dist in enumerate(enforce_min_distance_space):
            # Show log precision
            utils.pcolor_2d_data(result_all_precisions_mean[min_dist_i].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Precision, min_dist=%.3f' % min_dist)
            if savefigs:
                dataio.save_current_figure('precision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Show log precision
            utils.pcolor_2d_data(result_all_precisions_mean[min_dist_i].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Precision, min_dist=%.3f' % min_dist, log_scale=True)
            if savefigs:
                dataio.save_current_figure('logprecision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)


            # Plot estimated model precision
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 0].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='EM precision, min_dist=%.3f' % min_dist, log_scale=False)
            if savefigs:
                dataio.save_current_figure('logemprecision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Plot estimated Target, nontarget and random mixture components, in multiple subplots
            _, axes = plt.subplots(1, 3, figsize=(18, 6))
            plt.subplots_adjust(left=0.05, right=0.97, wspace = 0.3, bottom=0.15)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 1].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Target, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[0], ticks_interpolate=5)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 2].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Nontarget, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[1], ticks_interpolate=5)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 3].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Random, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[2], ticks_interpolate=5)

            if savefigs:
                dataio.save_current_figure('em_mixtureprobs_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Plot Log-likelihood of Mixture model, sanity check
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., -1].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='EM loglik, min_dist=%.3f' % min_dist, log_scale=False)
            if savefigs:
                dataio.save_current_figure('em_loglik_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

    if specific_plots_paper:
        # We need to choose 3 levels of min_distances
        target_sigmax = 0.25
        target_mindist_low = 0.15
        target_mindist_medium = 0.36
        target_mindist_high = 1.5

        sigmax_level_i = np.argmin(np.abs(sigmax_space - target_sigmax))
        min_dist_level_low_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_low))
        min_dist_level_medium_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_medium))
        min_dist_level_high_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_high))

        ## Do for each distance
        # for min_dist_i in [min_dist_level_low_i, min_dist_level_medium_i, min_dist_level_high_i]:
        for min_dist_i in xrange(enforce_min_distance_space.size):
            # Plot precision
            if False:
                utils.plot_mean_std_area(ratio_space, result_all_precisions_mean[min_dist_i, sigmax_level_i], result_all_precisions_std[min_dist_i, sigmax_level_i]) #, xlabel='Ratio conjunctivity', ylabel='Precision of recall')
                # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
                plt.ylim([0, np.max(result_all_precisions_mean[min_dist_i, sigmax_level_i] + result_all_precisions_std[min_dist_i, sigmax_level_i])])

                if savefigs:
                    dataio.save_current_figure('mindist%.2f_precisionrecall_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot kappa fitted
            ax_handle = utils.plot_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 0], result_em_fits_std[min_dist_i, sigmax_level_i, :, 0]) #, xlabel='Ratio conjunctivity', ylabel='Fitted kappa')
            # Add distance between items in kappa units
            dist_items_kappa = utils.stddev_to_kappa(enforce_min_distance_space[min_dist_i])
            ax_handle.plot(ratio_space, dist_items_kappa*np.ones(ratio_space.size), 'k--', linewidth=3)
            plt.ylim([-0.1, np.max((np.max(result_em_fits_mean[min_dist_i, sigmax_level_i, :, 0] + result_em_fits_std[min_dist_i, sigmax_level_i, :, 0]), 1.1*dist_items_kappa))])
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emkappa_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot kappa-stddev fitted. Easier to visualize
            ax_handle = utils.plot_mean_std_area(ratio_space, result_em_kappastddev_mean[min_dist_i, sigmax_level_i], result_em_kappastddev_std[min_dist_i, sigmax_level_i]) #, xlabel='Ratio conjunctivity', ylabel='Fitted kappa_stddev')
            # Add distance between items in std dev units
            dist_items_std = (enforce_min_distance_space[min_dist_i])
            ax_handle.plot(ratio_space, dist_items_std*np.ones(ratio_space.size), 'k--', linewidth=3)
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            plt.ylim([0, 1.1*np.max((np.max(result_em_kappastddev_mean[min_dist_i, sigmax_level_i] + result_em_kappastddev_std[min_dist_i, sigmax_level_i]), dist_items_std))])
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emkappastddev_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])


            if False:
                # Plot LLH
                utils.plot_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, -1], result_em_fits_std[min_dist_i, sigmax_level_i, :, -1]) #, xlabel='Ratio conjunctivity', ylabel='Loglikelihood of Mixture model fit')
                # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
                if savefigs:
                    dataio.save_current_figure('mindist%.2f_emllh_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

                # Plot mixture parameters, std
                utils.plot_multiple_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 1:4].T, result_em_fits_std[min_dist_i, sigmax_level_i, :, 1:4].T)
                plt.ylim([0.0, 1.1])
                # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
                # plt.legend("Target", "Non-target", "Random")
                if savefigs:
                    dataio.save_current_figure('mindist%.2f_emprobs_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

                # Mixture parameters, SEM
                utils.plot_multiple_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 1:4].T, result_em_fits_std[min_dist_i, sigmax_level_i, :, 1:4].T/np.sqrt(nb_repetitions))
                plt.ylim([0.0, 1.1])
                # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
                # plt.legend("Target", "Non-target", "Random")
                if savefigs:
                    dataio.save_current_figure('mindist%.2f_emprobs_forpaper_sem_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

    if plots_emfit_allitems:
        # We need to choose 3 levels of min_distances
        target_sigmax = 0.25
        target_mindist_low = 0.15
        target_mindist_medium = 0.36
        target_mindist_high = 1.5

        sigmax_level_i = np.argmin(np.abs(sigmax_space - target_sigmax))
        min_dist_level_low_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_low))
        min_dist_level_medium_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_medium))
        min_dist_level_high_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_high))

        min_dist_i_plotting_space = np.array([min_dist_level_low_i, min_dist_level_medium_i, min_dist_level_high_i])

        if should_fit_allitems_model:

            # kappa, mixt_target, mixt_nontargets (K), mixt_random, LL, bic
            # result_emfitallitems = np.empty((min_dist_i_plotting_space.size, ratio_space.size, 2*K+5))*np.nan
            result_emfitallitems = np.empty((enforce_min_distance_space.size, ratio_space.size, K+5))*np.nan

            ## Do for each distance
            # for min_dist_plotting_i, min_dist_i in enumerate(min_dist_i_plotting_space):
            for min_dist_i in xrange(enforce_min_distance_space.size):
                # Fit the mixture model
                for ratio_i, ratio in enumerate(ratio_space):
                    print "Refitting EM all items. Ratio:", ratio, "Dist:", enforce_min_distance_space[min_dist_i]

                    if mixturemodel_to_use == 'allitems_uniquekappa':
                        em_fit = em_circularmixture_allitems_uniquekappa.fit(
                            result_responses_all[min_dist_i, sigmax_level_i, ratio_i].flatten(),
                            result_target_all[min_dist_i, sigmax_level_i, ratio_i].flatten(),
                            result_nontargets_all[min_dist_i, sigmax_level_i, ratio_i].transpose((0, 2, 1)).reshape((N*nb_repetitions, K)))
                    elif mixturemodel_to_use == 'allitems_fikappa':
                        em_fit = em_circularmixture_allitems_kappafi.fit(result_responses_all[min_dist_i, sigmax_level_i, ratio_i].flatten(),
                            result_target_all[min_dist_i, sigmax_level_i, ratio_i].flatten(),
                            result_nontargets_all[min_dist_i, sigmax_level_i, ratio_i].transpose((0, 2, 1)).reshape((N*nb_repetitions, K)),
                            kappa=result_fisherinfo_mindist_sigmax_ratio[min_dist_i, sigmax_level_i, ratio_i])
                    else:
                        raise ValueError("Wrong mixturemodel_to_use, %s" % mixturemodel_to_use)

                    result_emfitallitems[min_dist_i, ratio_i] = [em_fit['kappa'], em_fit['mixt_target']] + em_fit['mixt_nontargets'].tolist() + [em_fit[key] for key in ('mixt_random', 'train_LL', 'bic')]

            # Save everything to a file, for faster later plotting
            if caching_emfit_filename is not None:
                try:
                    with open(caching_emfit_filename, 'w') as filecache_out:
                        data_em = dict(result_emfitallitems=result_emfitallitems, target_sigmax=target_sigmax)
                        pickle.dump(data_em, filecache_out, protocol=2)
                except IOError:
                    print "Error writing out to caching file ", caching_emfit_filename


        ## Plots now, for each distance!
        # for min_dist_plotting_i, min_dist_i in enumerate(min_dist_i_plotting_space):
        for min_dist_i in xrange(enforce_min_distance_space.size):

            # Plot now
            _, ax = plt.subplots()
            ax.plot(ratio_space, result_emfitallitems[min_dist_i, :, 1:5], linewidth=3)
            plt.ylim([0.0, 1.1])
            plt.legend(['Target', 'Nontarget 1', 'Nontarget 2', 'Random'], loc='upper left')

            if savefigs:
                dataio.save_current_figure('mindist%.2f_emprobsfullitems_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

    if plot_min_distance_effect:
        conj_receptive_field_size = 2.*np.pi/((all_args[0]['M']*ratio_space)**0.5)

        target_vs_nontargets_mindist_ratio = result_emfitallitems[..., 1]/np.sum(result_emfitallitems[..., 1:4], axis=-1)
        nontargetsmean_vs_targnontarg_mindist_ratio = np.mean(result_emfitallitems[..., 2:4]/np.sum(result_emfitallitems[..., 1:4], axis=-1)[..., np.newaxis], axis=-1)

        for ratio_conj_i, ratio_conj in enumerate(ratio_space):
            # Do one plot per ratio, putting the receptive field size on each
            f, ax = plt.subplots()

            ax.plot(enforce_min_distance_space[1:], target_vs_nontargets_mindist_ratio[1:, ratio_conj_i], linewidth=3, label='target mixture')
            ax.plot(enforce_min_distance_space[1:], nontargetsmean_vs_targnontarg_mindist_ratio[1:, ratio_conj_i], linewidth=3, label='non-target mixture')
            # ax.plot(enforce_min_distance_space[1:], result_emfitallitems[1:, ratio_conj_i, 1:5], linewidth=3)

            ax.axvline(x=conj_receptive_field_size[ratio_conj_i]/2., color='k', linestyle='--', linewidth=2)
            ax.axvline(x=conj_receptive_field_size[ratio_conj_i]*2., color='r', linestyle='--', linewidth=2)

            plt.legend(loc='upper left')
            plt.grid()
            # ax.set_xlabel('Stimuli separation')
            # ax.set_ylabel('Ratio Target to Non-targets')
            plt.axis('tight')
            ax.set_ylim([0.0, 1.0])
            ax.set_xlim([enforce_min_distance_space[1:].min(), enforce_min_distance_space[1:].max()])

            if savefigs:
                dataio.save_current_figure('ratio%.2f_mindistpred_ratiotargetnontarget_{label}_{unique_id}.pdf' % ratio_conj)


    if compute_bootstraps:
        ## Bootstrap evaluation

        # We need to choose 3 levels of min_distances
        target_sigmax = 0.25
        target_mindist_low = 0.15
        target_mindist_medium = 0.5
        target_mindist_high = 1.

        sigmax_level_i = np.argmin(np.abs(sigmax_space - target_sigmax))
        min_dist_level_low_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_low))
        min_dist_level_medium_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_medium))
        min_dist_level_high_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_high))

        # cache_bootstrap_fn = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'outputs', 'cache_bootstrap.pickle')
        cache_bootstrap_fn = '/Users/loicmatthey/Dropbox/UCL/1-phd/Work/Visual_working_memory/code/git-bayesian-visual-working-memory/Experiments/specific_stimuli/specific_stimuli_corrected_mixed_sigmaxmindistance_autoset_repetitions5mult_collectall_281113_outputs/cache_bootstrap.pickle'
        try:
            with open(cache_bootstrap_fn, 'r') as file_in:
                # Load and assign values
                cached_data = pickle.load(file_in)
                bootstrap_ecdf_bays_sigmax_T = cached_data['bootstrap_ecdf_bays_sigmax_T']
                bootstrap_ecdf_allitems_sum_sigmax_T = cached_data['bootstrap_ecdf_allitems_sum_sigmax_T']
                bootstrap_ecdf_allitems_all_sigmax_T = cached_data['bootstrap_ecdf_allitems_all_sigmax_T']
                should_fit_bootstrap = False

        except IOError:
            print "Error while loading ", cache_bootstrap_fn

        ratio_i = 0

        # bootstrap_allitems_nontargets_allitems_uniquekappa = em_circularmixture_allitems_uniquekappa.bootstrap_nontarget_stat(
        # result_responses_all[min_dist_level_low_i, sigmax_level_i, ratio_i].flatten(),
        # result_target_all[min_dist_level_low_i, sigmax_level_i, ratio_i].flatten(),
        # result_nontargets_all[min_dist_level_low_i, sigmax_level_i, ratio_i].transpose((0, 2, 1)).reshape((N*nb_repetitions, K)),
        # sumnontargets_bootstrap_ecdf=bootstrap_ecdf_allitems_sum_sigmax_T[sigmax_level_i][K]['ecdf'],
        # allnontargets_bootstrap_ecdf=bootstrap_ecdf_allitems_all_sigmax_T[sigmax_level_i][K]['ecdf']

        # TODO FINISH HERE

    variables_to_save = ['nb_repetitions']

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='specific_stimuli')


    plt.show()


    return locals()
コード例 #16
0
    def do_sigma_output_plot(variables_launcher_running, plotting_parameters):
        dataio = variables_launcher_running['dataio']
        sigmaoutput_space = variables_launcher_running['sigmaoutput_space']

        result_em_fits_mean = utils.nanmean(variables_launcher_running['result_em_fits'], axis=-1)
        result_em_fits_std = utils.nanstd(variables_launcher_running['result_em_fits'], axis=-1)

        plt.ion()

        # Memory curve kappa
        def sigmaoutput_plot_kappa(sigmaoutput_space, result_em_fits_mean, result_em_fits_std=None, exp_name='', ax=None):

            if ax is not None:
                plt.figure(ax.get_figure().number)
                ax.hold(False)

            ax = utils.plot_mean_std_area(sigmaoutput_space, result_em_fits_mean[..., 0], result_em_fits_std[..., 0], xlabel='sigma output', ylabel='Memory fidelity', linewidth=3, fmt='o-', markersize=8, label='Noise output effect', ax_handle=ax)

            ax.hold(True)

            ax.set_title("{{exp_name}} {T} {M} {ratio_conj:.2f} {sigmax:.3f} {sigmay:.2f}".format(**variables_launcher_running['all_parameters']).format(exp_name=exp_name))
            ax.legend()
            # ax.set_xlim([0.9, T_space_exp.max()+0.1])
            # ax.set_xticks(range(1, T_space_exp.max()+1))
            # ax.set_xticklabels(range(1, T_space_exp.max()+1))

            ax.get_figure().canvas.draw()

            dataio.save_current_figure('noiseoutput_kappa_%s_T{T}_M{M}_ratio{ratio_conj}_sigmax{sigmax}_sigmay{sigmay}_{{label}}_{{unique_id}}.pdf'.format(**variables_launcher_running['all_parameters']) % (exp_name))

            return ax

        # Plot EM Mixtures proportions
        def sigmaoutput_plot_mixtures(sigmaoutput_space, result_em_fits_mean, result_em_fits_std, exp_name='', ax=None):

            if ax is None:
                _, ax = plt.subplots()

            if ax is not None:
                plt.figure(ax.get_figure().number)
                ax.hold(False)

            # mixture probabilities
            print result_em_fits_mean[..., 1]

            result_em_fits_mean[np.isnan(result_em_fits_mean)] = 0.0
            result_em_fits_std[np.isnan(result_em_fits_std)] = 0.0

            utils.plot_mean_std_area(sigmaoutput_space, result_em_fits_mean[..., 1], result_em_fits_std[..., 1], xlabel='sigma output', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Target')
            ax.hold(True)
            utils.plot_mean_std_area(sigmaoutput_space, result_em_fits_mean[..., 2], result_em_fits_std[..., 2], xlabel='sigma output', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Nontarget')
            utils.plot_mean_std_area(sigmaoutput_space, result_em_fits_mean[..., 3], result_em_fits_std[..., 3], xlabel='sigma output', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Random')

            ax.legend(prop={'size':15})

            ax.set_title("{{exp_name}} {T} {M} {ratio_conj:.2f} {sigmax:.3f} {sigmay:.2f}".format(**variables_launcher_running['all_parameters']).format(exp_name=exp_name))

            ax.set_ylim([0.0, 1.1])
            ax.get_figure().canvas.draw()

            dataio.save_current_figure('memorycurves_emfits_%s_T{T}_M{M}_ratio{ratio_conj}_sigmax{sigmax}_sigmay{sigmay}_{{label}}_{{unique_id}}.pdf'.format(**variables_launcher_running['all_parameters']) % (exp_name))

            return ax

        # Do plots
        plotting_parameters['axes']['ax_sigmaoutput_kappa'] = sigmaoutput_plot_kappa(sigmaoutput_space, result_em_fits_mean, result_em_fits_std, exp_name='kappa', ax=plotting_parameters['axes']['ax_sigmaoutput_kappa'])
        plotting_parameters['axes']['ax_sigmaoutput_mixtures'] = sigmaoutput_plot_mixtures(sigmaoutput_space, result_em_fits_mean, result_em_fits_std, exp_name='mixt probs', ax=plotting_parameters['axes']['ax_sigmaoutput_mixtures'])
def plots_specific_stimuli_hierarchical(data_pbs, generator_module=None):
    '''
        Reload and plot behaviour of mixed population code on specific Stimuli
        of 3 items.
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plot_per_min_dist_all = True
    specific_plots_paper = True

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_all_precisions_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_em_fits_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_fits_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_kappastddev_mean = utils.nanmean(utils.kappa_to_stddev(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results'])[..., 0, :]), axis=-1)
    result_em_kappastddev_std = utils.nanstd(utils.kappa_to_stddev(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results'])[..., 0, :]), axis=-1)

    nb_repetitions = np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']).shape[-1]

    enforce_min_distance_space = data_pbs.loaded_data['parameters_uniques']['enforce_min_distance']
    sigmax_space = data_pbs.loaded_data['parameters_uniques']['sigmax']

    MMlower_valid_space = data_pbs.loaded_data['datasets_list'][0]['MMlower_valid_space']
    ratio_space = MMlower_valid_space[:, 0]/float(np.sum(MMlower_valid_space[0]))

    print enforce_min_distance_space
    print sigmax_space
    print MMlower_valid_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape

    dataio = DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])


    if plot_per_min_dist_all:
        # Do one plot per min distance.
        for min_dist_i, min_dist in enumerate(enforce_min_distance_space):
            # Show log precision
            utils.pcolor_2d_data(result_all_precisions_mean[min_dist_i].T, x=ratio_space, y=sigmax_space, xlabel='ratio layer two', ylabel='sigma_x', title='Precision, min_dist=%.3f' % min_dist)
            if savefigs:
                dataio.save_current_figure('precision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Show log precision
            utils.pcolor_2d_data(result_all_precisions_mean[min_dist_i].T, x=ratio_space, y=sigmax_space, xlabel='ratio layer two', ylabel='sigma_x', title='Precision, min_dist=%.3f' % min_dist, log_scale=True)
            if savefigs:
                dataio.save_current_figure('logprecision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)


            # Plot estimated model precision
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 0].T, x=ratio_space, y=sigmax_space, xlabel='ratio layer two', ylabel='sigma_x', title='EM precision, min_dist=%.3f' % min_dist, log_scale=False)
            if savefigs:
                dataio.save_current_figure('logemprecision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Plot estimated Target, nontarget and random mixture components, in multiple subplots
            _, axes = plt.subplots(1, 3, figsize=(18, 6))
            plt.subplots_adjust(left=0.05, right=0.97, wspace = 0.3, bottom=0.15)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 1].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Target, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[0], ticks_interpolate=5)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 2].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Nontarget, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[1], ticks_interpolate=5)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 3].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Random, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[2], ticks_interpolate=5)

            if savefigs:
                dataio.save_current_figure('em_mixtureprobs_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Plot Log-likelihood of Mixture model, sanity check
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., -1].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='EM loglik, min_dist=%.3f' % min_dist, log_scale=False)
            if savefigs:
                dataio.save_current_figure('em_loglik_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

    if specific_plots_paper:
        # We need to choose 3 levels of min_distances
        target_sigmax = 0.25
        target_mindist_low = 0.09
        target_mindist_medium = 0.36
        target_mindist_high = 1.5

        sigmax_level_i = np.argmin(np.abs(sigmax_space - target_sigmax))
        min_dist_level_low_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_low))
        min_dist_level_medium_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_medium))
        min_dist_level_high_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_high))

        ## Do for each distance
        # for min_dist_i in [min_dist_level_low_i, min_dist_level_medium_i, min_dist_level_high_i]:
        for min_dist_i in xrange(enforce_min_distance_space.size):
            # Plot precision
            utils.plot_mean_std_area(ratio_space, result_all_precisions_mean[min_dist_i, sigmax_level_i], result_all_precisions_std[min_dist_i, sigmax_level_i]) #, xlabel='Ratio conjunctivity', ylabel='Precision of recall')
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])

            if savefigs:
                dataio.save_current_figure('mindist%.2f_precisionrecall_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot kappa fitted
            utils.plot_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 0], result_em_fits_std[min_dist_i, sigmax_level_i, :, 0]) #, xlabel='Ratio conjunctivity', ylabel='Fitted kappa')
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emkappa_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot kappa-stddev fitted. Easier to visualize
            utils.plot_mean_std_area(ratio_space, result_em_kappastddev_mean[min_dist_i, sigmax_level_i], result_em_kappastddev_std[min_dist_i, sigmax_level_i]) #, xlabel='Ratio conjunctivity', ylabel='Fitted kappa_stddev')
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emkappastddev_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])


            # Plot LLH
            utils.plot_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, -1], result_em_fits_std[min_dist_i, sigmax_level_i, :, -1]) #, xlabel='Ratio conjunctivity', ylabel='Loglikelihood of Mixture model fit')
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emllh_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot mixture parameters
            utils.plot_multiple_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 1:4].T, result_em_fits_std[min_dist_i, sigmax_level_i, :, 1:4].T)
            plt.ylim([0.0, 1.1])
            # plt.legend("Target", "Non-target", "Random")
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emprobs_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot mixture parameters, SEM
            utils.plot_multiple_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 1:4].T, result_em_fits_std[min_dist_i, sigmax_level_i, :, 1:4].T/np.sqrt(nb_repetitions))
            plt.ylim([0.0, 1.1])
            # plt.legend("Target", "Non-target", "Random")
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emprobs_forpaper_sem_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])



    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = ['result_all_precisions_mean', 'result_em_fits_mean', 'result_all_precisions_std', 'result_em_fits_std', 'result_em_kappastddev_mean', 'result_em_kappastddev_std', 'enforce_min_distance_space', 'sigmax_space', 'ratio_space', 'all_args']

    if savedata:
        dataio.save_variables(variables_to_save, locals())
        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='specific_stimuli')

    plt.show()

    return locals()
def plots_memory_curves(data_pbs, generator_module=None):
    '''
        Reload and plot memory curve of a Mixed code.
        Can use Marginal Fisher Information and fitted Mixture Model as well
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plot_pcolor_fit_precision_to_fisherinfo = True
    plot_selected_memory_curves = False
    plot_best_memory_curves = True

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_all_precisions_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_em_fits_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_fits_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)

    ratiohier_space = data_pbs.loaded_data['parameters_uniques']['ratio_hierarchical']
    sigmax_space = data_pbs.loaded_data['parameters_uniques']['sigmax']
    T_space = data_pbs.loaded_data['datasets_list'][0]['T_space']

    print ratiohier_space
    print sigmax_space
    print T_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape

    dataio = DataIO.DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    ## Load Experimental data
    data_simult = load_experimental_data.load_data_simult(data_dir=os.path.normpath(os.path.join(os.path.split(load_experimental_data.__file__)[0], '../../experimental_data/')), fit_mixture_model=True)
    gorgo11_experimental_precision = data_simult['precision_nitems_theo']
    gorgo11_experimental_kappa = np.array([data['kappa'] for _, data in data_simult['em_fits_nitems']['mean'].items()])
    gorgo11_experimental_kappa_std = np.array([data['kappa'] for _, data in data_simult['em_fits_nitems']['std'].items()])
    gorgo11_experimental_emfits_mean = np.array([[data[key] for _, data in data_simult['em_fits_nitems']['mean'].items()] for key in ['kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random']])
    gorgo11_experimental_emfits_std = np.array([[data[key] for _, data in data_simult['em_fits_nitems']['std'].items()] for key in ['kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random']])
    gorgo11_experimental_emfits_sem = gorgo11_experimental_emfits_std/np.sqrt(np.unique(data_simult['subject']).size)
    gorgo11_T_space = data_simult['data_to_fit']['n_items']

    data_bays2009 = load_experimental_data.load_data_bays09(fit_mixture_model=True)
    bays09_experimental_mixtures_mean = data_bays2009['em_fits_nitems_arrays']['mean']
    bays09_experimental_mixtures_std = data_bays2009['em_fits_nitems_arrays']['std']
    # add interpolated points for 3 and 5 items
    emfit_mean_intpfct = spint.interp1d(np.unique(data_bays2009['n_items']), bays09_experimental_mixtures_mean)
    bays09_experimental_mixtures_mean_compatible = emfit_mean_intpfct(np.arange(1, 7))
    emfit_std_intpfct = spint.interp1d(np.unique(data_bays2009['n_items']), bays09_experimental_mixtures_std)
    bays09_experimental_mixtures_std_compatible = emfit_std_intpfct(np.arange(1, 7))
    bays09_T_space_interp = np.arange(1, 7)
    bays09_T_space = data_bays2009['data_to_fit']['n_items']



    # Compute some landscapes of fit!
    dist_diff_precision_experim = np.sum(np.abs(result_all_precisions_mean[..., :gorgo11_experimental_precision.size] - gorgo11_experimental_precision)**2., axis=-1)
    dist_diff_emkappa_experim = np.sum(np.abs(result_em_fits_mean[..., :gorgo11_experimental_precision.size, 0] - gorgo11_experimental_kappa)**2., axis=-1)

    dist_diff_precision_experim_1item = np.abs(result_all_precisions_mean[..., 0] - gorgo11_experimental_precision[0])**2.
    dist_diff_precision_experim_2item = np.abs(result_all_precisions_mean[..., 1] - gorgo11_experimental_precision[1])**2.
    dist_diff_emkappa_experim_1item = np.abs(result_em_fits_mean[..., 0, 0] - gorgo11_experimental_kappa[0])**2.

    dist_diff_em_mixtures_bays09 = np.sum(np.sum((result_em_fits_mean[..., 1:4] - bays09_experimental_mixtures_mean_compatible[1:].T)**2., axis=-1), axis=-1)
    dist_diff_modelfits_experfits_bays09 = np.sum(np.sum((result_em_fits_mean[..., :4] - bays09_experimental_mixtures_mean_compatible.T)**2., axis=-1), axis=-1)

    if plot_pcolor_fit_precision_to_fisherinfo:
        utils.pcolor_2d_data(dist_diff_precision_experim, x=ratiohier_space, y=sigmax_space, xlabel='ratio hier', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_diff_precision_experim_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_emkappa_experim, x=ratiohier_space, y=sigmax_space, xlabel='ratio hier', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_diff_emkappa_experim_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_precision_experim*dist_diff_emkappa_experim, x=ratiohier_space, y=sigmax_space, xlabel='ratio hier', ylabel='sigmax', log_scale=True)
        if savefigs:
            dataio.save_current_figure('match_bigmultiplication_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_precision_experim_1item, log_scale=True, x=ratiohier_space, y=sigmax_space, xlabel='ratio hier', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_precision_experim_1item_log_pcolor_{label}_{unique_id}.pdf')


        utils.pcolor_2d_data(dist_diff_emkappa_experim_1item, log_scale=True, x=ratiohier_space, y=sigmax_space, xlabel='ratio hier', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_emkappa_experim_1item_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_precision_experim_2item, log_scale=True, x=ratiohier_space, y=sigmax_space, xlabel='ratio hier', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_precision_experim_2item_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_em_mixtures_bays09, log_scale=True, x=ratiohier_space, y=sigmax_space, xlabel='ratio hier', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_mixtures_experbays09_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_diff_modelfits_experfits_bays09, log_scale=True, x=ratiohier_space, y=sigmax_space, xlabel='ratio hier', ylabel='sigmax')
        if savefigs:
            dataio.save_current_figure('match_diff_emfits_experbays09_pcolor_{label}_{unique_id}.pdf')


    # Macro plot
    def mem_plot_precision(sigmax_i, ratiohier_i, mem_exp_prec):
        ax = utils.plot_mean_std_area(T_space[:mem_exp_prec.size], mem_exp_prec, np.zeros(mem_exp_prec.size), linewidth=3, fmt='o-', markersize=8, label='Experimental data')

        ax = utils.plot_mean_std_area(T_space[:mem_exp_prec.size], result_all_precisions_mean[ratiohier_i, sigmax_i, :mem_exp_prec.size], result_all_precisions_std[ratiohier_i, sigmax_i, :mem_exp_prec.size], ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Precision of samples')

        # ax = utils.plot_mean_std_area(T_space, 0.5*result_marginal_fi_mean[..., 0][ratiohier_i, sigmax_i], 0.5*result_marginal_fi_std[..., 0][ratiohier_i, sigmax_i], ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Marginal Fisher Information')

        # ax = utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 0][ratiohier_i, sigmax_i], result_em_fits_std[..., 0][ratiohier_i, sigmax_i], ax_handle=ax, xlabel='Number of items', ylabel="Inverse variance $[rad^{-2}]$", linewidth=3, fmt='o-', markersize=8, label='Fitted kappa')

        ax.set_title('ratio_hier %.2f, sigmax %.2f' % (ratiohier_space[ratiohier_i], sigmax_space[sigmax_i]))
        ax.legend()
        ax.set_xlim([0.9, mem_exp_prec.size + 0.1])
        ax.set_xticks(range(1, mem_exp_prec.size + 1))
        ax.set_xticklabels(range(1, mem_exp_prec.size + 1))

        if savefigs:
            dataio.save_current_figure('memorycurves_precision_ratiohier%.2fsigmax%.2f_{label}_{unique_id}.pdf' % (ratiohier_space[ratiohier_i], sigmax_space[sigmax_i]))

    def mem_plot_kappa(sigmax_i, ratiohier_i, T_space_exp, exp_kappa_mean, exp_kappa_std=None):
        ax = utils.plot_mean_std_area(T_space_exp, exp_kappa_mean, exp_kappa_std, linewidth=3, fmt='o-', markersize=8, label='Experimental data')

        ax = utils.plot_mean_std_area(T_space[:T_space_exp.max()], result_em_fits_mean[..., :T_space_exp.max(), 0][ratiohier_i, sigmax_i], result_em_fits_std[..., :T_space_exp.max(), 0][ratiohier_i, sigmax_i], xlabel='Number of items', ylabel="Memory error $[rad^{-2}]$", linewidth=3, fmt='o-', markersize=8, label='Fitted kappa', ax_handle=ax)

        ax.set_title('ratio_hier %.2f, sigmax %.2f' % (ratiohier_space[ratiohier_i], sigmax_space[sigmax_i]))
        ax.legend()
        ax.set_xlim([0.9, T_space_exp.max()+0.1])
        ax.set_xticks(range(1, T_space_exp.max()+1))
        ax.set_xticklabels(range(1, T_space_exp.max()+1))

        ax.get_figure().canvas.draw()

        if savefigs:
            dataio.save_current_figure('memorycurves_kappa_ratiohier%.2fsigmax%.2f_{label}_{unique_id}.pdf' % (ratiohier_space[ratiohier_i], sigmax_space[sigmax_i]))

    # def em_plot(sigmax_i, ratiohier_i):
    #     # TODO finish checking this up.
    #     f, ax = plt.subplots()
    #     ax2 = ax.twinx()

    #     # left axis, kappa
    #     ax = utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 0][ratiohier_i, sigmax_i], result_em_fits_std[..., 0][ratiohier_i, sigmax_i], xlabel='Number of items', ylabel="Inverse variance $[rad^{-2}]$", ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Fitted kappa', color='k')

    #     # Right axis, mixture probabilities
    #     utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 1][ratiohier_i, sigmax_i], result_em_fits_std[..., 1][ratiohier_i, sigmax_i], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Target')
    #     utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 2][ratiohier_i, sigmax_i], result_em_fits_std[..., 2][ratiohier_i, sigmax_i], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Nontarget')
    #     utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 3][ratiohier_i, sigmax_i], result_em_fits_std[..., 3][ratiohier_i, sigmax_i], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Random')

    #     lines, labels = ax.get_legend_handles_labels()
    #     lines2, labels2 = ax2.get_legend_handles_labels()
    #     ax.legend(lines + lines2, labels + labels2)

    #     ax.set_title('ratio_hier %.2f, sigmax %.2f' % (ratiohier_space[ratiohier_i], sigmax_space[sigmax_i]))
    #     ax.set_xlim([0.9, 5.1])
    #     ax.set_xticks(range(1, 6))
    #     ax.set_xticklabels(range(1, 6))

    #     f.canvas.draw()

    #     if savefigs:
    #         dataio.save_current_figure('memorycurves_emfits_ratiohier%.2fsigmax%.2f_{label}_{unique_id}.pdf' % (ratiohier_space[ratiohier_i], sigmax_space[sigmax_i]))

    def em_plot_paper(sigmax_i, ratiohier_i):
        f, ax = plt.subplots()

        # mixture probabilities
        utils.plot_mean_std_area(bays09_T_space_interp, result_em_fits_mean[..., 1][ratiohier_i, sigmax_i][:bays09_T_space_interp.size], result_em_fits_std[..., 1][ratiohier_i, sigmax_i][:bays09_T_space_interp.size], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Target')
        utils.plot_mean_std_area(bays09_T_space_interp, result_em_fits_mean[..., 2][ratiohier_i, sigmax_i][:bays09_T_space_interp.size], result_em_fits_std[..., 2][ratiohier_i, sigmax_i][:bays09_T_space_interp.size], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Nontarget')
        utils.plot_mean_std_area(bays09_T_space_interp, result_em_fits_mean[..., 3][ratiohier_i, sigmax_i][:bays09_T_space_interp.size], result_em_fits_std[..., 3][ratiohier_i, sigmax_i][:bays09_T_space_interp.size], xlabel='Number of items', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Random')

        ax.legend(prop={'size':15})

        ax.set_title('ratio_hier %.2f, sigmax %.2f' % (ratiohier_space[ratiohier_i], sigmax_space[sigmax_i]))
        ax.set_xlim([1.0, bays09_T_space_interp.size])
        ax.set_ylim([0.0, 1.1])
        ax.set_xticks(range(1, bays09_T_space_interp.size+1))
        ax.set_xticklabels(range(1, bays09_T_space_interp.size+1))

        f.canvas.draw()

        if savefigs:
            dataio.save_current_figure('memorycurves_emfits_paper_ratiohier%.2fsigmax%.2f_{label}_{unique_id}.pdf' % (ratiohier_space[ratiohier_i], sigmax_space[sigmax_i]))

    if plot_selected_memory_curves:
        selected_values = [[0.84, 0.23], [0.84, 0.19]]

        for current_values in selected_values:
            # Find the indices
            ratiohier_i     = np.argmin(np.abs(current_values[0] - ratiohier_space))
            sigmax_i        = np.argmin(np.abs(current_values[1] - sigmax_space))


            # mem_plot_precision(sigmax_i, ratiohier_i)
            # mem_plot_kappa(sigmax_i, ratiohier_i)

    if plot_best_memory_curves:

        # Best precision fit
        best_axis2_i_all = np.argmin(dist_diff_precision_experim, axis=1)
        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            mem_plot_precision(best_axis2_i, axis1_i, gorgo11_experimental_precision)

        # Best kappa fit
        best_axis2_i_all = np.argmin(dist_diff_emkappa_experim, axis=1)
        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            mem_plot_kappa(best_axis2_i, axis1_i, gorgo11_T_space, gorgo11_experimental_emfits_mean[0], gorgo11_experimental_emfits_std[0])
            # em_plot(best_axis2_i, axis1_i)

        # Best em parameters fit to Bays09
        best_axis2_i_all = np.argmin(dist_diff_modelfits_experfits_bays09, axis=1)
        # best_axis2_i_all = np.argmin(dist_diff_em_mixtures_bays09, axis=1)

        for axis1_i, best_axis2_i in enumerate(best_axis2_i_all):
            mem_plot_kappa(best_axis2_i, axis1_i, bays09_T_space, bays09_experimental_mixtures_mean[0], bays09_experimental_mixtures_std[0])
            # em_plot(best_axis2_i, axis1_i)
            em_plot_paper(best_axis2_i, axis1_i)

    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = ['gorgo11_experimental_precision', 'gorgo11_experimental_kappa', 'bays09_experimental_mixtures_mean_compatible']

    if savedata:
        dataio.save_variables_default(locals(), additional_variables=variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='memory_curves')

    plt.show()

    return locals()
def plots_specific_stimuli_mixed(data_pbs, generator_module=None):
    '''
        Reload and plot behaviour of mixed population code on specific Stimuli
        of 3 items.
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plot_per_min_dist_all = False
    specific_plots_paper = False
    specific_plots_emfits = True

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_all_precisions_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_em_fits_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_fits_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_kappastddev_mean = utils.nanmean(utils.kappa_to_stddev(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results'])[..., 0, :]), axis=-1)
    result_em_kappastddev_std = utils.nanstd(utils.kappa_to_stddev(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results'])[..., 0, :]), axis=-1)
    result_responses_all = np.squeeze(data_pbs.dict_arrays['result_responses']['results'])
    result_target_all = np.squeeze(data_pbs.dict_arrays['result_target']['results'])
    result_nontargets_all = np.squeeze(data_pbs.dict_arrays['result_nontargets']['results'])

    nb_repetitions = result_responses_all.shape[-1]
    K = result_nontargets_all.shape[-2]
    N = result_responses_all.shape[-2]

    enforce_min_distance_space = data_pbs.loaded_data['parameters_uniques']['enforce_min_distance']
    sigmax_space = data_pbs.loaded_data['parameters_uniques']['sigmax']
    ratio_space = data_pbs.loaded_data['datasets_list'][0]['ratio_space']

    print enforce_min_distance_space
    print sigmax_space
    print ratio_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape
    print result_responses_all.shape

    dataio = DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])


    if plot_per_min_dist_all:
        # Do one plot per min distance.
        for min_dist_i, min_dist in enumerate(enforce_min_distance_space):
            # Show log precision
            utils.pcolor_2d_data(result_all_precisions_mean[min_dist_i].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Precision, min_dist=%.3f' % min_dist)
            if savefigs:
                dataio.save_current_figure('precision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Show log precision
            utils.pcolor_2d_data(result_all_precisions_mean[min_dist_i].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Precision, min_dist=%.3f' % min_dist, log_scale=True)
            if savefigs:
                dataio.save_current_figure('logprecision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)


            # Plot estimated model precision
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 0].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='EM precision, min_dist=%.3f' % min_dist, log_scale=False)
            if savefigs:
                dataio.save_current_figure('logemprecision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Plot estimated Target, nontarget and random mixture components, in multiple subplots
            _, axes = plt.subplots(1, 3, figsize=(18, 6))
            plt.subplots_adjust(left=0.05, right=0.97, wspace = 0.3, bottom=0.15)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 1].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Target, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[0], ticks_interpolate=5)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 2].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Nontarget, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[1], ticks_interpolate=5)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 3].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Random, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[2], ticks_interpolate=5)

            if savefigs:
                dataio.save_current_figure('em_mixtureprobs_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Plot Log-likelihood of Mixture model, sanity check
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., -1].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='EM loglik, min_dist=%.3f' % min_dist, log_scale=False)
            if savefigs:
                dataio.save_current_figure('em_loglik_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

    if specific_plots_paper:
        # We need to choose 3 levels of min_distances
        target_sigmax = 0.25
        target_mindist_low = 0.15
        target_mindist_medium = 0.36
        target_mindist_high = 1.5

        sigmax_level_i = np.argmin(np.abs(sigmax_space - target_sigmax))
        min_dist_level_low_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_low))
        min_dist_level_medium_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_medium))
        min_dist_level_high_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_high))

        ## Do for each distance
        for min_dist_i in [min_dist_level_low_i, min_dist_level_medium_i, min_dist_level_high_i]:
        # for min_dist_i in xrange(enforce_min_distance_space.size):
            # Plot precision
            utils.plot_mean_std_area(ratio_space, result_all_precisions_mean[min_dist_i, sigmax_level_i], result_all_precisions_std[min_dist_i, sigmax_level_i]) #, xlabel='Ratio conjunctivity', ylabel='Precision of recall')
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            plt.ylim([0, np.max(result_all_precisions_mean[min_dist_i, sigmax_level_i] + result_all_precisions_std[min_dist_i, sigmax_level_i])])

            if savefigs:
                dataio.save_current_figure('mindist%.2f_precisionrecall_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot kappa fitted
            utils.plot_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 0], result_em_fits_std[min_dist_i, sigmax_level_i, :, 0]) #, xlabel='Ratio conjunctivity', ylabel='Fitted kappa')
            plt.ylim([-0.1, np.max(result_em_fits_mean[min_dist_i, sigmax_level_i, :, 0] + result_em_fits_std[min_dist_i, sigmax_level_i, :, 0])])
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emkappa_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot kappa-stddev fitted. Easier to visualize
            utils.plot_mean_std_area(ratio_space, result_em_kappastddev_mean[min_dist_i, sigmax_level_i], result_em_kappastddev_std[min_dist_i, sigmax_level_i]) #, xlabel='Ratio conjunctivity', ylabel='Fitted kappa_stddev')
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            plt.ylim([0, 1.1*np.max(result_em_kappastddev_mean[min_dist_i, sigmax_level_i] + result_em_kappastddev_std[min_dist_i, sigmax_level_i])])
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emkappastddev_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])


            # Plot LLH
            utils.plot_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, -1], result_em_fits_std[min_dist_i, sigmax_level_i, :, -1]) #, xlabel='Ratio conjunctivity', ylabel='Loglikelihood of Mixture model fit')
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emllh_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot mixture parameters, std
            utils.plot_multiple_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 1:4].T, result_em_fits_std[min_dist_i, sigmax_level_i, :, 1:4].T)
            plt.ylim([0.0, 1.1])
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            # plt.legend("Target", "Non-target", "Random")
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emprobs_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Mixture parameters, SEM
            utils.plot_multiple_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 1:4].T, result_em_fits_std[min_dist_i, sigmax_level_i, :, 1:4].T/np.sqrt(nb_repetitions))
            plt.ylim([0.0, 1.1])
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            # plt.legend("Target", "Non-target", "Random")
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emprobs_forpaper_sem_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

    if specific_plots_emfits:
        # We need to choose 3 levels of min_distances
        target_sigmax = 0.25
        target_mindist_low = 0.15
        target_mindist_medium = 0.36
        target_mindist_high = 1.5

        sigmax_level_i = np.argmin(np.abs(sigmax_space - target_sigmax))
        min_dist_level_low_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_low))
        min_dist_level_medium_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_medium))
        min_dist_level_high_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_high))

        min_dist_i_plotting_space = np.array([min_dist_level_low_i, min_dist_level_medium_i, min_dist_level_high_i])

        # kappa (K+1), mixt_target, mixt_nontargets (K), mixt_random, LL, bic
        result_emfitallitems = np.empty((min_dist_i_plotting_space.size, ratio_space.size, 2*K+5))*np.nan

        ## Do for each distance
        for min_dist_plotting_i, min_dist_i in enumerate(min_dist_i_plotting_space):
            # Fit the mixture model
            for ratio_i, ratio in enumerate(ratio_space):
                print "Refitting EM all items. Ratio:", ratio, "Dist:", enforce_min_distance_space[min_dist_i]
                em_fit = em_circularmixture_allitems.fit(
                    result_responses_all[min_dist_i, sigmax_level_i, ratio_i].flatten(),
                    result_target_all[min_dist_i, sigmax_level_i, ratio_i].flatten(),
                    result_nontargets_all[min_dist_i, sigmax_level_i, ratio_i].transpose((0, 2, 1)).reshape((N*nb_repetitions, K)))

                result_emfitallitems[min_dist_plotting_i, ratio_i] = em_fit['kappa'].tolist() + [em_fit['mixt_target']] + em_fit['mixt_nontargets'].tolist() + [em_fit[key] for key in ('mixt_random', 'train_LL', 'bic')]

            # Plot now
            _, ax = plt.subplots()
            ax.plot(ratio_space, result_emfitallitems[min_dist_plotting_i, :, 3:7])

            if savefigs:
                dataio.save_current_figure('mindist%.2f_emprobsfullitems_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])



    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = ['nb_repetitions']

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='specific_stimuli')


    plt.show()

    return locals()
def plots_memory_curves(data_pbs, generator_module=None):
    '''
        Reload and plot memory curve of a feature code.
        Can use Marginal Fisher Information and fitted Mixture Model as well
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plot_pcolor_fit_precision_to_fisherinfo = True
    plot_selected_memory_curves = True

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_all_precisions_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_em_fits_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_fits_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_marginal_inv_fi_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_marginal_inv_fi']['results']), axis=-1)
    result_marginal_inv_fi_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_marginal_inv_fi']['results']), axis=-1)
    result_marginal_fi_mean = utils.nanmean(1./np.squeeze(data_pbs.dict_arrays['result_marginal_inv_fi']['results']), axis=-1)
    result_marginal_fi_std = utils.nanstd(1./np.squeeze(data_pbs.dict_arrays['result_marginal_inv_fi']['results']), axis=-1)

    M_space = data_pbs.loaded_data['parameters_uniques']['M']
    sigmax_space = data_pbs.loaded_data['parameters_uniques']['sigmax']
    T_space = data_pbs.loaded_data['datasets_list'][0]['T_space']

    print M_space
    print sigmax_space
    print T_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape, result_marginal_inv_fi_mean.shape

    dataio = DataIO.DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    ## Load Experimental data
    data_simult = load_experimental_data.load_data_simult(data_dir=os.path.normpath(os.path.join(os.path.split(load_experimental_data.__file__)[0], '../../experimental_data/')))
    memory_experimental = data_simult['precision_nitems_theo']

    # Compute some landscapes of fit!
    dist_diff_precision_margfi = np.sum(np.abs(result_all_precisions_mean*2. - result_marginal_fi_mean[..., 0])**2., axis=-1)
    dist_diff_precision_margfi_1item = np.abs(result_all_precisions_mean[..., 0]*2. - result_marginal_fi_mean[..., 0, 0])**2.
    dist_diff_all_precision_margfi = np.abs(result_all_precisions_mean*2. - result_marginal_fi_mean[..., 0])**2.

    dist_ratio_precision_margfi = np.sum(np.abs((result_all_precisions_mean*2.)/result_marginal_fi_mean[..., 0] - 1.0)**2., axis=-1)

    dist_diff_emkappa_margfi = np.sum(np.abs(result_em_fits_mean[..., 0]*2. - result_marginal_fi_mean[..., 0])**2., axis=-1)
    dist_ratio_emkappa_margfi = np.sum(np.abs((result_em_fits_mean[..., 0]*2.)/result_marginal_fi_mean[..., 0] - 1.0)**2., axis=-1)

    dist_diff_precision_experim = np.sum(np.abs(result_all_precisions_mean - memory_experimental)**2., axis=-1)
    dist_diff_emkappa_experim = np.sum(np.abs(result_em_fits_mean[..., 0] - memory_experimental)**2., axis=-1)


    if plot_pcolor_fit_precision_to_fisherinfo:
        # Check fit between precision and fisher info
        utils.pcolor_2d_data(dist_diff_precision_margfi, log_scale=True, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax')

        if savefigs:
           dataio.save_current_figure('match_precision_margfi_log_pcolor_{label}_{unique_id}.pdf')

        # utils.pcolor_2d_data(dist_diff_precision_margfi, x=M_space, y=sigmax_space[2:], xlabel='M', ylabel='sigmax')
        # if savefigs:
        #    dataio.save_current_figure('match_precision_margfi_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data(dist_ratio_precision_margfi[4:], x=M_space[4:], y=sigmax_space, xlabel='M', ylabel='sigmax', log_scale=True)

        utils.pcolor_2d_data(dist_diff_emkappa_margfi, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax', log_scale=True)

        utils.pcolor_2d_data(dist_ratio_emkappa_margfi[4:], x=M_space[4:], y=sigmax_space, xlabel='M', ylabel='sigmax', log_scale=True)

        utils.pcolor_2d_data(dist_diff_precision_experim, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax', log_scale=True)

        utils.pcolor_2d_data(dist_diff_emkappa_experim, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax', log_scale=True)

        utils.pcolor_2d_data(dist_diff_precision_margfi*dist_diff_emkappa_margfi*dist_diff_precision_experim*dist_diff_emkappa_experim, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax', log_scale=True)

        utils.pcolor_2d_data(dist_diff_precision_margfi_1item, log_scale=True, x=M_space, y=sigmax_space, xlabel='M', ylabel='sigmax')


    if plot_selected_memory_curves:
        selected_values = [[20, 0.19], [40, 0.15], [100, 0.44], [140, 0.15], [40, 0.17], [60, 0.50]]

        for current_values in selected_values:
            # Find the indices
            M_i         = np.argmin(np.abs(current_values[0] - M_space))
            sigmax_i    = np.argmin(np.abs(current_values[1] - sigmax_space))

            ax = utils.plot_mean_std_area(T_space, memory_experimental, np.zeros(T_space.size), linewidth=3, fmt='o-', markersize=8)

            ax = utils.plot_mean_std_area(T_space, result_all_precisions_mean[M_i, sigmax_i], result_all_precisions_std[M_i, sigmax_i], ax_handle=ax, linewidth=3, fmt='o-', markersize=8)

            ax = utils.plot_mean_std_area(T_space, 0.5*result_marginal_fi_mean[..., 0][M_i, sigmax_i], 0.5*result_marginal_fi_std[..., 0][M_i, sigmax_i], ax_handle=ax, linewidth=3, fmt='o-', markersize=8)

            ax = utils.plot_mean_std_area(T_space, result_em_fits_mean[..., 0][M_i, sigmax_i], result_em_fits_std[..., 0][M_i, sigmax_i], ax_handle=ax, xlabel='Number of items', ylabel='Kappa/Inverse variance', linewidth=3, fmt='o-', markersize=8)

            ax.set_title('M %d, sigmax %.2f' % (M_space[M_i], sigmax_space[sigmax_i]))
            plt.legend(['Experimental data', 'Precision of samples', 'Marginal Fisher Information', 'Fitted kappa'])

            ax.set_xlim([0.9, 5.1])
            ax.set_xticks(range(1, 6))
            ax.set_xticklabels(range(1, 6))

            if savefigs:
                dataio.save_current_figure('memorycurves_M%dsigmax%.2f_{label}_{unique_id}.pdf' % (M_space[M_i], sigmax_space[sigmax_i]))



    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = ['result_all_precisions_mean', 'result_em_fits_mean', 'result_marginal_inv_fi_mean', 'result_all_precisions_std', 'result_em_fits_std', 'result_marginal_inv_fi_std', 'result_marginal_fi_mean', 'result_marginal_fi_std', 'M_space', 'sigmax_space', 'T_space', 'all_args']

    if savedata:
        dataio.save_variables(variables_to_save, locals())


    plt.show()

    return locals()
def plots_ratioMscaling(data_pbs, generator_module=None):
    '''
        Reload and plot precision/fits of a Mixed code.
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plots_pcolor_all = True
    plots_effect_M_target_kappa = False

    plots_kappa_fi_comparison = False
    plots_multiple_fisherinfo = False
    specific_plot_effect_R = False

    convert_M_realsizes = True

    plots_pcolor_realsizes_Msubs = True
    plots_pcolor_realsizes_Mtot = True

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = (utils.nanmean(data_pbs.dict_arrays['result_all_precisions']['results'], axis=-1))
    result_all_precisions_std = (utils.nanstd(data_pbs.dict_arrays['result_all_precisions']['results'], axis=-1))
    result_em_fits_mean = (utils.nanmean(data_pbs.dict_arrays['result_em_fits']['results'], axis=-1))
    result_em_fits_std = (utils.nanstd(data_pbs.dict_arrays['result_em_fits']['results'], axis=-1))
    result_fisherinfo_mean = (utils.nanmean(data_pbs.dict_arrays['result_fisher_info']['results'], axis=-1))
    result_fisherinfo_std = (utils.nanstd(data_pbs.dict_arrays['result_fisher_info']['results'], axis=-1))

    result_em_fits_kappa = result_em_fits_mean[..., 0]
    result_em_fits_target = result_em_fits_mean[..., 1]
    result_em_fits_kappa_valid = np.ma.masked_where(result_em_fits_target < 0.8, result_em_fits_kappa)


    # flat versions
    result_parameters_flat = np.array(data_pbs.dict_arrays['result_all_precisions']['parameters_flat'])
    result_all_precisions_mean_flat = np.mean(np.array(data_pbs.dict_arrays['result_all_precisions']['results_flat']), axis=-1)
    result_em_fits_mean_flat = np.mean(np.array(data_pbs.dict_arrays['result_em_fits']['results_flat']), axis=-1)
    result_fisherinfor_mean_flat = np.mean(np.array(data_pbs.dict_arrays['result_fisher_info']['results_flat']), axis=-1)
    result_em_fits_kappa_flat = result_em_fits_mean_flat[..., 0]
    result_em_fits_target_flat = result_em_fits_mean_flat[..., 1]
    result_em_fits_kappa_valid_flat = np.ma.masked_where(result_em_fits_target_flat < 0.8, result_em_fits_kappa_flat)

    all_args = data_pbs.loaded_data['args_list']


    M_space = data_pbs.loaded_data['parameters_uniques']['M'].astype(int)
    ratio_space = data_pbs.loaded_data['parameters_uniques']['ratio_conj']
    R_space = data_pbs.loaded_data['parameters_uniques']['R'].astype(int)
    num_repetitions = generator_module.num_repetitions

    print M_space
    print ratio_space
    print R_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape

    dataio = DataIO.DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    MAX_DISTANCE = 100.

    if convert_M_realsizes:
        # alright, currently M*ratio_conj gives the conjunctive subpopulation,
        # but only floor(M_conj**1/R) neurons are really used. So we should
        # convert to M_conj_real and M_feat_real instead of M and ratio
        result_parameters_flat_subM_converted = []
        result_parameters_flat_Mtot_converted = []

        for params in result_parameters_flat:
            M = params[0]; ratio_conj = params[1]; R = int(params[2])

            M_conj_prior = int(M*ratio_conj)
            M_conj_true = int(np.floor(M_conj_prior**(1./R))**R)
            M_feat_true = int(np.floor((M-M_conj_prior)/R)*R)

            # result_parameters_flat_subM_converted contains (M_conj, M_feat, R)
            result_parameters_flat_subM_converted.append(np.array([M_conj_true, M_feat_true, R]))
            # result_parameters_flat_Mtot_converted contains (M_tot, ratio_conj, R)
            result_parameters_flat_Mtot_converted.append(np.array([float(M_conj_true+M_feat_true), M_conj_true/float(M_conj_true+M_feat_true), R]))

        result_parameters_flat_subM_converted = np.array(result_parameters_flat_subM_converted)
        result_parameters_flat_Mtot_converted = np.array(result_parameters_flat_Mtot_converted)

    if plots_pcolor_all:
        if convert_M_realsizes:
            def plot_interp(points, data, currR_indices, title='', points_label='', xlabel='', ylabel=''):
                utils.contourf_interpolate_data_interactive_maxvalue(points[currR_indices][..., :2], data[currR_indices], xlabel=xlabel, ylabel=ylabel, title='%s, R=%d' % (title, R), interpolation_numpoints=200, interpolation_method='nearest', log_scale=False)

                if savefigs:
                    dataio.save_current_figure('pcolortrueM%s_%s_R%d_log_{label}_{unique_id}.pdf' % (points_label, title, R))

            all_datas = [dict(name='precision', data=result_all_precisions_mean_flat), dict(name='kappa', data=result_em_fits_kappa_flat), dict(name='kappavalid', data=result_em_fits_kappa_valid_flat), dict(name='target', data=result_em_fits_target_flat), dict(name='fisherinfo', data=result_fisherinfor_mean_flat)]
            all_points = []
            if plots_pcolor_realsizes_Msubs:
                all_points.append(dict(name='sub', data=result_parameters_flat_subM_converted, xlabel='M_conj', ylabel='M_feat'))
            if plots_pcolor_realsizes_Mtot:
                all_points.append(dict(name='tot', data=result_parameters_flat_Mtot_converted, xlabel='Mtot', ylabel='ratio_conj'))

            for curr_points in all_points:
                for curr_data in all_datas:
                    for R_i, R in enumerate(R_space):
                        currR_indices = curr_points['data'][:, 2] == R

                        plot_interp(curr_points['data'], curr_data['data'], currR_indices, title=curr_data['name'], points_label=curr_points['name'], xlabel=curr_points['xlabel'], ylabel=curr_points['ylabel'])

                # # show precision
                # plot_interp(result_parameters_flat_subM_converted, result_all_precisions_mean_flat, currR_indices, title='precision')

                # # show kappa
                # plot_interp(result_parameters_flat_subM_converted, result_em_fits_kappa_flat, currR_indices, title='kappa')

                # plot_interp(result_parameters_flat_subM_converted, result_em_fits_kappa_valid_flat, currR_indices, title='kappavalid')

                # # show probability on target
                # plot_interp(result_parameters_flat_subM_converted, result_em_fits_target_flat, currR_indices, title='target')

                # # show fisher info
                # plot_interp(result_parameters_flat_subM_converted, result_fisherinfor_mean_flat, currR_indices, title='fisherinfo')

        else:
            # Do one pcolor for M and ratio per R
            for R_i, R in enumerate(R_space):
                # Check evolution of precision given M and ratio
                utils.pcolor_2d_data(result_all_precisions_mean[..., R_i], log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='precision, R=%d' % R)
                if savefigs:
                    dataio.save_current_figure('pcolor_precision_R%d_log_{label}_{unique_id}.pdf' % R)

                # Show kappa
                try:
                    utils.pcolor_2d_data(result_em_fits_kappa_valid[..., R_i], log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='kappa, R=%d' % R)
                    if savefigs:
                        dataio.save_current_figure('pcolor_kappa_R%d_log_{label}_{unique_id}.pdf' % R)
                except ValueError:
                    pass

                # Show probability on target
                utils.pcolor_2d_data(result_em_fits_target[..., R_i], log_scale=False, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='target, R=%d' % R)
                if savefigs:
                    dataio.save_current_figure('pcolor_target_R%d_{label}_{unique_id}.pdf' % R)

                # # Show Fisher info
                utils.pcolor_2d_data(result_fisherinfo_mean[..., R_i], log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='fisher info, R=%d' % R)
                if savefigs:
                    dataio.save_current_figure('pcolor_fisherinfo_R%d_log_{label}_{unique_id}.pdf' % R)

                plt.close('all')

    if plots_effect_M_target_kappa:
        def plot_ratio_target_kappa(ratio_target_kappa_given_M, target_kappa, R):
            f, ax = plt.subplots()
            ax.plot(M_space, ratio_target_kappa_given_M)
            ax.set_xlabel('M')
            ax.set_ylabel('Optimal ratio')
            ax.set_title('Optimal Ratio for kappa %d, R=%d' % (target_kappa, R))

            if savefigs:
                dataio.save_current_figure('optratio_M_targetkappa%d_R%d_{label}_{unique_id}.pdf' % (target_kappa, R))

        target_kappas = np.array([100, 200, 300, 500, 1000, 3000])
        for R_i, R in enumerate(R_space):
            for target_kappa in target_kappas:
                dist_to_target_kappa = (result_em_fits_kappa[..., R_i] - target_kappa)**2.
                best_dist_to_target_kappa = np.argmin(dist_to_target_kappa, axis=1)
                ratio_target_kappa_given_M = np.ma.masked_where(dist_to_target_kappa[np.arange(dist_to_target_kappa.shape[0]), best_dist_to_target_kappa] > MAX_DISTANCE, ratio_space[best_dist_to_target_kappa])

                # replot
                plot_ratio_target_kappa(ratio_target_kappa_given_M, target_kappa, R)

            plt.close('all')


    if plots_kappa_fi_comparison:

        # result_em_fits_kappa and fisher info
        if True:
            for R_i, R in enumerate(R_space):
                for M_tot_selected_i, M_tot_selected in enumerate(M_space[::2]):

                    # M_conj_space = ((1.-ratio_space)*M_tot_selected).astype(int)
                    # M_feat_space = M_tot_selected - M_conj_space

                    f, axes = plt.subplots(2, 1)
                    axes[0].plot(ratio_space, result_em_fits_kappa[2*M_tot_selected_i, ..., R_i])
                    axes[0].set_xlabel('ratio')
                    axes[0].set_title('Fitted kappa')

                    axes[1].plot(ratio_space, utils.stddev_to_kappa(1./result_fisherinfo_mean[2*M_tot_selected_i, ..., R_i]**0.5))
                    axes[1].set_xlabel('ratio')
                    axes[1].set_title('kappa_FI')

                    f.suptitle('M_tot %d' % M_tot_selected, fontsize=15)
                    f.set_tight_layout(True)

                    if savefigs:
                        dataio.save_current_figure('comparison_kappa_fisher_R%d_M%d_{label}_{unique_id}.pdf' % (R, M_tot_selected))

                    plt.close(f)

        if plots_multiple_fisherinfo:
            target_fisherinfos = np.array([100, 200, 300, 500, 1000])
            for R_i, R in enumerate(R_space):
                for target_fisherinfo in target_fisherinfos:
                    dist_to_target_fisherinfo = (result_fisherinfo_mean[..., R_i] - target_fisherinfo)**2.

                    utils.pcolor_2d_data(dist_to_target_fisherinfo, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='Fisher info, R=%d' % R)
                    if savefigs:
                        dataio.save_current_figure('pcolor_distfi%d_R%d_log_{label}_{unique_id}.pdf' % (target_fisherinfo, R))

                plt.close('all')

    if specific_plot_effect_R:
        # Choose a M, find which ratio gives best fit to a given kappa
        M_target = 356
        M_target_i = np.argmin(np.abs(M_space - M_target))

        utils.pcolor_2d_data(result_em_fits_kappa_valid[M_target_i], log_scale=True, x=ratio_space, y=R_space, xlabel='ratio', ylabel='R', ylabel_format="%d", title='Kappa, M %d' % (M_target))
        if savefigs:
            dataio.save_current_figure('specific_pcolor_kappa_M%d_log_{label}_{unique_id}.pdf' % (M_target))
        # target_kappa = np.ma.mean(result_em_fits_kappa_valid[M_target_i])
        # target_kappa = 5*1e3
        target_kappa = 1.2e3

        dist_target_kappa = np.abs(result_em_fits_kappa_valid[M_target_i] - target_kappa)

        utils.pcolor_2d_data(dist_target_kappa, log_scale=True, x=ratio_space, y=R_space, xlabel='ratio', ylabel='R', ylabel_format="%d", title='Kappa dist %.2f, M %d' % (target_kappa, M_target))
        if savefigs:
            dataio.save_current_figure('specific_pcolor_distkappa%d_M%d_log_{label}_{unique_id}.pdf' % (target_kappa, M_target))




    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = []

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='higher_dimensions_R')

    plt.show()

    return locals()