Пример #1
0
    def setup_experimental_stimuli_T(self, T):
        '''
            Setup everything needed (Sampler, etc) and then force a human experimental dataset.

            If already setup correctly, do nothing.
        '''

        assert T in self.T_space, "T=%d not possible. %s" % (T, self.T_space)

        if self.enforced_T != T:
            self.enforced_T = T

            if T not in self.all_samplers:
                print "\n>>> Setting up {} nitems, {} datapoints".format(T, self.num_datapoints)

                # Update parameters
                self.parameters['T'] = T
                self.parameters['N'] = self.num_datapoints
                self.parameters['fixed_cued_feature_time'] = self.experiment_data_to_fit[T]['probe'][0]  # should be scalar

                self.parameters['stimuli_to_use'] = self.experiment_data_to_fit[T]['item_features'][self.filter_datapoints_mask]

                # Instantiate everything
                (_, _, _, newSampler) = launchers.init_everything(self.parameters)

                # Fix responses to the human ones
                newSampler.set_theta(self.experiment_data_to_fit[T]['response'][self.filter_datapoints_mask])

                # Store it
                self.all_samplers[self.enforced_T] = newSampler

            self.sampler = self.all_samplers[self.enforced_T]
def test_loglike_fit():
    """
        Check if the LL computation is correct

        Use specific data, generated from a given model. This model should then have max LL.
    """

    # Get a specific model, with given ratio and sigmax
    experiment_parameters = dict(
        action_to_do="launcher_do_simple_run",
        inference_method="sample",
        T=2,
        M=200,
        N=400,
        num_samples=500,
        selection_method="last",
        sigmax=0.15,
        sigmay=0.0001,
        code_type="mixed",
        ratio_conj=0.6,
        output_directory=".",
        stimuli_generation_recall="random",
        autoset_parameters=None,
    )
    experiment_launcher = experimentlauncher.ExperimentLauncher(run=True, arguments_dict=experiment_parameters)
    experiment_parameters_full = experiment_launcher.args_dict
    sampler = experiment_launcher.all_vars["sampler"]

    # Keep its dataset and responses
    stimuli_correct_to_force = sampler.data_gen.stimuli_correct.copy()
    response_to_force = sampler.theta[:, 0].copy()
    LL_target = sampler.compute_loglikelihood()

    experiment_parameters_full["stimuli_to_use"] = stimuli_correct_to_force

    ratio_space = np.linspace(0.0, 1.0, 31.0)

    LL_all_new = np.zeros(ratio_space.shape)

    for ratio_conj_i, ratio_conj in enumerate(ratio_space):

        experiment_parameters_full["ratio_conj"] = ratio_conj

        _, _, _, sampler = launchers.init_everything(experiment_parameters_full)

        # Set responses
        sampler.set_theta(response_to_force)

        # Compute LL
        # LL_all_new[ratio_conj_i] = sampler.compute_loglikelihood()
        LL_all_new[ratio_conj_i] = sampler.compute_loglikelihood_top90percent()

        # Print result
        print LL_all_new[ratio_conj_i]

    print LL_target
    print ratio_space, LL_all_new
    print ratio_space[np.argmax(LL_all_new)]

    return locals()
    def setup_experimental_stimuli(self, T, trecall):
        '''
            Setup everything needed (Sampler, etc) and then force a human experimental dataset.

            If already setup correctly, do nothing.
        '''

        assert T in self.T_space, "T=%d not possible. %s" % (T, self.T_space)

        if self.enforced_T != T or self.enforced_trecall != trecall:
            self.enforced_T = T
            self.enforced_trecall = trecall

            if (T, trecall) not in self.all_samplers:
                print "\n>>> Setting up {} nitems, {} trecall, {} datapoints".format(T, trecall, self.num_datapoints)

                # Update parameters
                self.parameters['T'] = T
                self.parameters['N'] = self.num_datapoints
                self.parameters['fixed_cued_feature_time'] = T - trecall

                self.parameters['stimuli_to_use'] = (
                    self.experiment_data_to_fit[T][trecall]['item_features'][
                        self.filter_datapoints_mask])

                # Instantiate everything
                (_, _, _, self.sampler) = launchers.init_everything(self.parameters)

                # Fix responses to the human ones
                self.sampler.set_theta(
                    self.experiment_data_to_fit[T][trecall]['responses'][
                        self.filter_datapoints_mask])
                self.store_responses('human')

                # Store it
                self.all_samplers[(T, trecall)] = self.sampler

            self.sampler = self.all_samplers[
                (self.enforced_T, self.enforced_trecall)]
def launcher_do_mixed_varyratio_precision_pbs(args):
    """
        Compare the evolution of the precision curve as the number of neurons in a mixed network changes.
    """

    print "Doing a piece of work for launcher_do_mixed_varyratio_precision_pbs"
    save_all_output = False

    try:
        # Convert Argparse.Namespace to dict
        all_parameters = vars(args)
    except TypeError:
        # Assume it's already done
        assert type(args) is dict, "args is neither Namespace nor dict, WHY?"
        all_parameters = args

    code_type = "mixed"

    dataio = DataIO(
        output_folder=all_parameters["output_directory"], label=all_parameters["label"].format(**all_parameters)
    )

    save_every = 5
    run_counter = 0

    num_repetitions = all_parameters["num_repetitions"]

    ratio_space = np.array([all_parameters["ratio_conj"]])
    T_space = np.arange(1, all_parameters["T"] + 1)

    results_precision_ratio_T = np.nan * np.empty((ratio_space.size, T_space.size, num_repetitions), dtype=float)

    # if save_all_output:
    #     results_all_responses = np.nan*np.empty((M_space.size, M_lower_space.size, T_space.size, num_repetitions, all_parameters['N']))
    #     results_all_targets = np.nan*np.empty((M_space.size, M_lower_space.size, T_space.size, num_repetitions, all_parameters['N']))
    #     results_all_nontargets = np.nan*np.empty((M_space.size, M_lower_space.size, T_space.size, num_repetitions, all_parameters['N'], all_parameters['T']-1))

    # Show the progress
    search_progress = progress.Progress(T_space.size * ratio_space.size * num_repetitions)

    print T_space
    print ratio_space

    for repet_i in xrange(num_repetitions):
        for ratio_conj_i, ratio_conj in enumerate(ratio_space):
            for t_i, t in enumerate(T_space):
                # Will estimate the precision

                print "Precision as function of N, hierarchical network, T: %d/%d, ratio_conj %.2f, (%d/%d). %.2f%%, %s left - %s" % (
                    t,
                    T_space[-1],
                    ratio_conj,
                    repet_i + 1,
                    num_repetitions,
                    search_progress.percentage(),
                    search_progress.time_remaining_str(),
                    search_progress.eta_str(),
                )

                # Current parameter values
                all_parameters["T"] = t
                all_parameters["code_type"] = code_type
                all_parameters["ratio_conj"] = ratio_conj

                ### WORK UNIT
                (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters)

                if all_parameters["inference_method"] == "sample":
                    # Sample thetas
                    sampler.sample_theta(
                        num_samples=all_parameters["num_samples"],
                        burn_samples=100,
                        selection_method=all_parameters["selection_method"],
                        selection_num_samples=all_parameters["selection_num_samples"],
                        integrate_tc_out=False,
                        debug=False,
                    )
                elif all_parameters["inference_method"] == "max_lik":
                    # Just use the ML value for the theta
                    sampler.set_theta_max_likelihood(num_points=100, post_optimise=True)

                results_precision_ratio_T[ratio_conj_i, t_i, repet_i] = sampler.get_precision()
                print results_precision_ratio_T[ratio_conj_i, t_i, repet_i]

                # if save_all_output:
                #     (results_all_responses[ratio_conj_i, t_i, repet_i], results_all_targets[ratio_conj_i, t_i, repet_i], results_all_nontargets[ratio_conj_i, t_i, repet_i, :, :t_i]) = sampler.collect_responses()

                ### DONE WORK UNIT

                search_progress.increment()

                if run_counter % save_every == 0 or search_progress.done():
                    dataio.save_variables_default(locals())

                run_counter += 1

    print "All finished"

    return locals()
def plots_ratioMscaling(data_pbs, generator_module=None):
    '''
        Reload and plot precision/fits of a Mixed code.
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plots_pcolor_all = False
    plots_effect_M_target_precision = False
    plots_multiple_precisions = False

    plots_effect_M_target_kappa = False

    plots_subpopulations_effects = False

    plots_subpopulations_effects_kappa_fi = True
    compute_fisher_info_perratioconj = True
    caching_fisherinfo_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'cache_fisherinfo.pickle')

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_precisions_mean = (utils.nanmean(data_pbs.dict_arrays['result_all_precisions']['results'], axis=-1))
    result_all_precisions_std = (utils.nanstd(data_pbs.dict_arrays['result_all_precisions']['results'], axis=-1))
    result_em_fits_mean = (utils.nanmean(data_pbs.dict_arrays['result_em_fits']['results'], axis=-1))
    result_em_fits_std = (utils.nanstd(data_pbs.dict_arrays['result_em_fits']['results'], axis=-1))

    all_args = data_pbs.loaded_data['args_list']

    result_em_fits_kappa = result_em_fits_mean[..., 0]

    M_space = data_pbs.loaded_data['parameters_uniques']['M'].astype(int)
    ratio_space = data_pbs.loaded_data['parameters_uniques']['ratio_conj']
    num_repetitions = generator_module.num_repetitions

    print M_space
    print ratio_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape

    dataio = DataIO.DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    target_precision = 100.
    dist_to_target_precision = (result_all_precisions_mean - target_precision)**2.
    best_dist_to_target_precision = np.argmin(dist_to_target_precision, axis=1)
    MAX_DISTANCE = 100.

    ratio_target_precision_given_M = np.ma.masked_where(dist_to_target_precision[np.arange(dist_to_target_precision.shape[0]), best_dist_to_target_precision] > MAX_DISTANCE, ratio_space[best_dist_to_target_precision])

    if plots_pcolor_all:
        # Check evolution of precision given M and ratio
        utils.pcolor_2d_data(result_all_precisions_mean, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='precision wrt M / ratio')
        if savefigs:
            dataio.save_current_figure('precision_log_pcolor_{label}_{unique_id}.pdf')

        # See distance to target precision evolution
        utils.pcolor_2d_data(dist_to_target_precision, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='Dist to target precision %d' % target_precision)
        if savefigs:
            dataio.save_current_figure('dist_targetprecision_log_pcolor_{label}_{unique_id}.pdf')


        # Show kappa
        utils.pcolor_2d_data(result_em_fits_kappa, log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='kappa wrt M / ratio')
        if savefigs:
            dataio.save_current_figure('kappa_log_pcolor_{label}_{unique_id}.pdf')

        utils.pcolor_2d_data((result_em_fits_kappa - 200)**2., log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='dist to kappa')
        if savefigs:
            dataio.save_current_figure('dist_kappa_log_pcolor_{label}_{unique_id}.pdf')


    if plots_effect_M_target_precision:
        def plot_ratio_target_precision(ratio_target_precision_given_M, target_precision):
            f, ax = plt.subplots()
            ax.plot(M_space, ratio_target_precision_given_M)
            ax.set_xlabel('M')
            ax.set_ylabel('Optimal ratio')
            ax.set_title('Optimal Ratio for precison %d' % target_precision)

            if savefigs:
                dataio.save_current_figure('effect_ratio_M_targetprecision%d_{label}_{unique_id}.pdf' % target_precision)

        plot_ratio_target_precision(ratio_target_precision_given_M, target_precision)

        if plots_multiple_precisions:
            target_precisions = np.array([100, 200, 300, 500, 1000])
            for target_precision in target_precisions:
                dist_to_target_precision = (result_all_precisions_mean - target_precision)**2.
                best_dist_to_target_precision = np.argmin(dist_to_target_precision, axis=1)
                ratio_target_precision_given_M = np.ma.masked_where(dist_to_target_precision[np.arange(dist_to_target_precision.shape[0]), best_dist_to_target_precision] > MAX_DISTANCE, ratio_space[best_dist_to_target_precision])

                # replot
                plot_ratio_target_precision(ratio_target_precision_given_M, target_precision)

    if plots_effect_M_target_kappa:
        def plot_ratio_target_kappa(ratio_target_kappa_given_M, target_kappa):
            f, ax = plt.subplots()
            ax.plot(M_space, ratio_target_kappa_given_M)
            ax.set_xlabel('M')
            ax.set_ylabel('Optimal ratio')
            ax.set_title('Optimal Ratio for precison %d' % target_kappa)

            if savefigs:
                dataio.save_current_figure('effect_ratio_M_targetkappa%d_{label}_{unique_id}.pdf' % target_kappa)

        target_kappa = np.array([100, 200, 300, 500, 1000, 3000])
        for target_kappa in target_kappa:
            dist_to_target_kappa = (result_em_fits_kappa - target_kappa)**2.
            best_dist_to_target_kappa = np.argmin(dist_to_target_kappa, axis=1)
            ratio_target_kappa_given_M = np.ma.masked_where(dist_to_target_kappa[np.arange(dist_to_target_kappa.shape[0]), best_dist_to_target_kappa] > MAX_DISTANCE, ratio_space[best_dist_to_target_kappa])

            # replot
            plot_ratio_target_kappa(ratio_target_kappa_given_M, target_kappa)

    if plots_subpopulations_effects:
        # result_all_precisions_mean
        for M_tot_selected_i, M_tot_selected in enumerate(M_space[::2]):

            M_conj_space = ((1.-ratio_space)*M_tot_selected).astype(int)
            M_feat_space = M_tot_selected - M_conj_space

            f, axes = plt.subplots(2, 2)
            axes[0, 0].plot(ratio_space, result_all_precisions_mean[2*M_tot_selected_i])
            axes[0, 0].set_xlabel('ratio')
            axes[0, 0].set_title('Measured precision')

            axes[1, 0].plot(ratio_space, M_conj_space**2*M_feat_space)
            axes[1, 0].set_xlabel('M_feat_size')
            axes[1, 0].set_title('M_c**2*M_f')

            axes[0, 1].plot(ratio_space, M_conj_space**2.)
            axes[0, 1].set_xlabel('M')
            axes[0, 1].set_title('M_c**2')

            axes[1, 1].plot(ratio_space, M_feat_space)
            axes[1, 1].set_xlabel('M')
            axes[1, 1].set_title('M_f')

            f.suptitle('M_tot %d' % M_tot_selected, fontsize=15)
            f.set_tight_layout(True)

            if savefigs:
                dataio.save_current_figure('scaling_precision_subpop_Mtot%d_{label}_{unique_id}.pdf' % M_tot_selected)

            plt.close(f)

    if plots_subpopulations_effects_kappa_fi:
        # From cache
        if caching_fisherinfo_filename is not None:
            if os.path.exists(caching_fisherinfo_filename):
                # Got file, open it and try to use its contents
                try:
                    with open(caching_fisherinfo_filename, 'r') as file_in:
                        # Load and assign values
                        cached_data = pickle.load(file_in)
                        result_fisherinfo_Mratio = cached_data['result_fisherinfo_Mratio']
                        compute_fisher_info_perratioconj = False

                except IOError:
                    print "Error while loading ", caching_fisherinfo_filename, "falling back to computing the Fisher Info"

        if compute_fisher_info_perratioconj:
            # We did not save the Fisher info, but need it if we want to fit the mixture model with fixed kappa. So recompute them using the args_dicts

            result_fisherinfo_Mratio = np.empty((M_space.size, ratio_space.size))

            # Invert the all_args_i -> M, ratio_conj direction
            parameters_indirections = data_pbs.loaded_data['parameters_dataset_index']

            for M_i, M in enumerate(M_space):
                for ratio_conj_i, ratio_conj in enumerate(ratio_space):
                    # Get index of first dataset with the current ratio_conj (no need for the others, I think)
                    try:
                        arg_index = parameters_indirections[(M, ratio_conj)][0]

                        # Now using this dataset, reconstruct a RandomFactorialNetwork and compute the fisher info
                        curr_args = all_args[arg_index]

                        # curr_args['stimuli_generation'] = lambda T: np.linspace(-np.pi*0.6, np.pi*0.6, T)

                        (_, _, _, sampler) = launchers.init_everything(curr_args)

                        # Theo Fisher info
                        result_fisherinfo_Mratio[M_i, ratio_conj_i] = sampler.estimate_fisher_info_theocov()

                        # del curr_args['stimuli_generation']
                    except KeyError:
                        result_fisherinfo_Mratio[M_i, ratio_conj_i] = np.nan


            # Save everything to a file, for faster later plotting
            if caching_fisherinfo_filename is not None:
                try:
                    with open(caching_fisherinfo_filename, 'w') as filecache_out:
                        data_cache = dict(result_fisherinfo_Mratio=result_fisherinfo_Mratio)
                        pickle.dump(data_cache, filecache_out, protocol=2)
                except IOError:
                    print "Error writing out to caching file ", caching_fisherinfo_filename

        # result_em_fits_kappa
        if False:

            for M_tot_selected_i, M_tot_selected in enumerate(M_space[::2]):

                M_conj_space = ((1.-ratio_space)*M_tot_selected).astype(int)
                M_feat_space = M_tot_selected - M_conj_space

                f, axes = plt.subplots(2, 2)
                axes[0, 0].plot(ratio_space, result_em_fits_kappa[2*M_tot_selected_i])
                axes[0, 0].set_xlabel('ratio')
                axes[0, 0].set_title('Fitted kappa')

                axes[1, 0].plot(ratio_space, utils.stddev_to_kappa(1./result_fisherinfo_Mratio[2*M_tot_selected_i]**0.5))
                axes[1, 0].set_xlabel('M_feat_size')
                axes[1, 0].set_title('kappa_FI_mixed')

                f.suptitle('M_tot %d' % M_tot_selected, fontsize=15)
                f.set_tight_layout(True)

                if savefigs:
                    dataio.save_current_figure('scaling_kappa_subpop_Mtot%d_{label}_{unique_id}.pdf' % M_tot_selected)

                plt.close(f)

        utils.pcolor_2d_data((result_fisherinfo_Mratio- 2000)**2., log_scale=True, x=M_space, y=ratio_space, xlabel='M', ylabel='ratio', xlabel_format="%d", title='Fisher info')
        if savefigs:
            dataio.save_current_figure('dist2000_fi_log_pcolor_{label}_{unique_id}.pdf')




    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = []

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='ratio_scaling_M')

    plt.show()

    return locals()
def launcher_do_error_distributions_allT(args):
    '''
        Compute histograms of errors distributions. Also get histogram of bias to nontargets.

        Do it for t=1...T items.

        Looks like the Bays 2009, used in paper.
    '''

    print "Doing a piece of work for launcher_do_error_distributions_allT"

    try:
        # Convert Argparse.Namespace to dict
        all_parameters = vars(args)
    except TypeError:
        # Assume it's already done
        assert type(args) is dict, "args is neither Namespace nor dict, WHY?"
        all_parameters = args

    print all_parameters

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0
    bins = 51

    # Parameters to vary
    T_all = all_parameters['T']
    T_space = np.arange(1, T_all+1)

    # Result arrays
    result_responses = np.nan*np.ones((T_space.size, all_parameters['N'], all_parameters['num_repetitions']))
    result_target = np.nan*np.ones((T_space.size, all_parameters['N'], all_parameters['num_repetitions']))
    result_nontargets = np.nan*np.ones((T_space.size, all_parameters['N'], T_all-1, all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.ones((T_space.size, 5, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll


    search_progress = progress.Progress(T_space.size*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for T_i, T in enumerate(T_space):
            print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

            print "Fit for T=%d, %d/%d" % (T, repet_i+1, all_parameters['num_repetitions'])

            # Update parameter
            all_parameters['T'] = T

            ### WORK WORK WORK work? ###

            # Instantiate
            (_, _, _, sampler) = launchers.init_everything(all_parameters)

            # Sample
            sampler.run_inference(all_parameters)

            # Collect and store responses
            (responses, target, nontarget) = sampler.collect_responses()
            result_responses[T_i, :, repet_i] = responses
            result_target[T_i, :, repet_i] = target
            result_nontargets[T_i, :, :T_i, repet_i] = nontarget[:, :T_i]

            # Fit mixture model
            curr_params_fit = em_circularmixture.fit(*sampler.collect_responses())
            result_em_fits[T_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random', 'train_LL')]


            # Do plots
            sampler.plot_histogram_errors(bins=bins)
            dataio.save_current_figure('papertheo_histogram_errorsM%dsigmax%.2fT%d_{label}_{unique_id}.pdf' % tuple([all_parameters[key] for key in ('M', 'sigmax', 'T')]))

            if T > 1:
                sampler.plot_histogram_bias_nontarget(dataio=dataio)

            ### /Work ###

            search_progress.increment()
            if run_counter % save_every == 0 or search_progress.done():
                dataio.save_variables_default(locals())
            run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def launcher_do_average_posterior(args):
    '''
        Compute average posterior for a fixed set of stimuli.


        Show graphically how it looks like.
    '''

    # TODO Could analyse it theoretically at some point, e.g. probability of answering nontarget?


    print "Doing a piece of work for launcher_do_average_posterior"

    try:
        # Convert Argparse.Namespace to dict
        all_parameters = vars(args)
    except TypeError:
        # Assume it's already done
        assert type(args) is dict, "args is neither Namespace nor dict, WHY?"
        all_parameters = args


    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))

    # Fix some parameters
    all_parameters['stimuli_generation'] = lambda T: np.linspace(-np.pi*0.6, np.pi*0.6, T)
    all_parameters['stimuli_generation_recall'] = 'random'
    all_parameters['enforce_first_stimulus'] = False
    num_points = 500

    if 'do_precision' in all_parameters:
        do_precision = all_parameters['do_precision']
    else:
        do_precision = True

    result_all_log_posterior = np.nan*np.ones((all_parameters['N'], num_points))
    result_all_thetas = np.nan*np.empty(all_parameters['N'])

    search_progress = progress.Progress(all_parameters['N'])
    save_every = 10
    print_every = 10
    run_counter = 0
    ax_handle = None

    plt.ion()

    # all_parameters['rc_scale']  = rc_scale

    (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters)

    ### WORK WORK WORK work? ###
    all_angles = np.linspace(-np.pi, np.pi, num_points)

    if do_precision:
        print 'Precision...'
        sampler.sample_theta(num_samples=all_parameters['num_samples'], burn_samples=all_parameters['burn_samples'], selection_method=all_parameters['selection_method'], selection_num_samples=all_parameters['selection_num_samples'], integrate_tc_out=False, debug=True)

        result_all_thetas, targets, nontargets = sampler.collect_responses()


    print "Average posterior..."

    for n in xrange(all_parameters['N']):
        if run_counter % print_every == 0:
            print "%.2f%% %s/%s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

        result_all_log_posterior[n] = sampler.compute_likelihood_fullspace(n=n, all_angles=all_angles, num_points=num_points, should_exponentiate=False, remove_mean=True)[:, -1].T

        ### /Work ###

        search_progress.increment()
        if run_counter % save_every == 0 or search_progress.done():
            dataio.save_variables_default(locals())

            # Plots
            # plt.figure(1)
            # plt.plot(result_all_log_posterior.T, hold=False)

            # ax_handle = plot_mean_std_area(all_angles, nanmean(result_all_log_posterior[ axis=0), nanstd(result_all_log_posterior[ axis=0), ax_handle=ax_handle)
            # ax_handle.hold(False)

            # dataio.save_current_figure('FI_compare_theo_finite-precisionvstheo-{label}_{unique_id}.pdf')


        run_counter += 1

    #### Plots ###
    plot_mean_std_area(all_angles, nanmean(result_all_log_posterior, axis=0), nanstd(result_all_log_posterior, axis=0), ax_handle=ax_handle)
    dataio.save_current_figure('avg_posterior-posterior-{label}_{unique_id}.pdf')
    if do_precision:
        sampler.plot_histogram_errors(bins=50, nice_xticks=True)
        dataio.save_current_figure('avg_posterior-hist_errors-{label}_{unique_id}.pdf')

        sampler.plot_histogram_responses(bins=50, show_angles=True, nice_xticks=True)
        dataio.save_current_figure('avg_posterior-hist_responses-{label}_{unique_id}.pdf')

    print "All finished"

    plt.show()

    return locals()
def launcher_do_fit_mixturemodels_sequential_alltrecall(args):
    '''
        Run the model for 1..T items sequentially, for all possible trecall/T.
        Compute:
        - Precision of samples
        - EM mixture model fits. Both independent and collapsed model.
        - Theoretical Fisher Information
        - EM Mixture model distances to set of currently working datasets.
    '''

    print "Doing a piece of work for launcher_do_fit_mixturemodels_sequential_alltrecall"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO.DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Load dataset to compare against
    data_gorgo11_sequ = load_experimental_data.load_data_gorgo11_sequential(data_dir=all_parameters['experiment_data_dir'], fit_mixture_model=True)
    gorgo11_sequ_T_space = np.unique(data_gorgo11_sequ['n_items'])


    # Parameters to vary
    T_max = all_parameters['T']
    T_space = np.arange(1, T_max+1)
    repetitions_axis = -1

    # Result arrays
    result_all_precisions = np.nan*np.empty((T_space.size, T_space.size, all_parameters['num_repetitions']))
    result_fi_theo = np.nan*np.empty((T_space.size, T_space.size, all_parameters['num_repetitions']))
    result_fi_theocov = np.nan*np.empty((T_space.size, T_space.size, all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.empty((T_space.size, T_space.size, 6, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll, bic
    result_em_fits_collapsed_tr = np.nan*np.empty((T_space.size, T_space.size, 4, all_parameters['num_repetitions']))  # kappa_tr, mixt_target_tr, mixt_nontarget_tr, mixt_random_tr
    result_em_fits_collapsed_summary = np.nan*np.empty((5, all_parameters['num_repetitions'])) # bic, ll, kappa_theta

    result_dist_gorgo11_sequ = np.nan*np.empty((T_space.size, T_space.size, 4, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random
    result_dist_gorgo11_sequ_emmixt_KL = np.nan*np.empty((T_space.size, T_space.size, all_parameters['num_repetitions']))

    result_dist_gorgo11_sequ_collapsed = np.nan*np.empty((T_space.size, T_space.size, 4, all_parameters['num_repetitions']))
    result_dist_gorgo11_sequ_collapsed_emmixt_KL = np.nan*np.empty((T_space.size, T_space.size, all_parameters['num_repetitions']))

    gorgo11_sequ_collapsed_mixtmod_mean = data_gorgo11_sequ['collapsed_em_fits_doublepowerlaw_array']


    # If desired, will automatically save all Model responses.
    if all_parameters['collect_responses']:
        print "--- Collecting all responses..."
        result_responses = np.nan*np.empty((T_space.size, T_space.size, all_parameters['N'], all_parameters['num_repetitions']))
        result_target = np.nan*np.empty((T_space.size, T_space.size, all_parameters['N'], all_parameters['num_repetitions']))
        result_nontargets = np.nan*np.empty((T_space.size, T_space.size, all_parameters['N'], T_max-1, all_parameters['num_repetitions']))

    search_progress = progress.Progress(T_space.size*(T_space.size + 1)/2.*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for T_i, T in enumerate(T_space):
            for trecall_i, trecall in enumerate(np.arange(T, 0, -1)):
                # Inverting indexing of trecall, to be consistent. trecall_i 0 == last item.
                # But trecall still means the actual time of recall!
                print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())
                print "Fit for T=%d, tr=%d, %d/%d" % (T, trecall, repet_i+1, all_parameters['num_repetitions'])

                # Update parameter
                all_parameters['T'] = T
                all_parameters['fixed_cued_feature_time'] = trecall - 1

                ### WORK WORK WORK work? ###
                # Instantiate
                (_, _, _, sampler) = launchers.init_everything(all_parameters)

                # Sample
                sampler.run_inference(all_parameters)

                # Compute precision
                print "get precision..."
                result_all_precisions[T_i, trecall_i, repet_i] = sampler.get_precision()

                # Fit mixture model, independent
                print "fit mixture model..."
                curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
                result_em_fits[T_i, trecall_i, :, repet_i] = [curr_params_fit[key] for key in ['kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL', 'bic']]

                # Compute fisher info
                print "compute fisher info"
                result_fi_theo[T_i, trecall_i, repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=False)
                result_fi_theocov[T_i, trecall_i, repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=True)

                # Compute distances to datasets (this is for the non-collapsed stuff, not the best)
                if T in gorgo11_sequ_T_space:
                    gorgo11_sequ_mixtures_mean = data_gorgo11_sequ['em_fits_nitems_trecall_arrays'][gorgo11_sequ_T_space==T, trecall_i, :4].flatten()

                    result_dist_gorgo11_sequ[T_i, trecall_i, :, repet_i] = (gorgo11_sequ_mixtures_mean - result_em_fits[T_i, trecall_i, :4, repet_i])**2.
                    result_dist_gorgo11_sequ_emmixt_KL[T_i, trecall_i, repet_i] = utils.KL_div(result_em_fits[T_i, trecall_i, 1:4, repet_i], gorgo11_sequ_mixtures_mean[1:])


                # If needed, store responses
                if all_parameters['collect_responses']:
                    print "collect responses"
                    (responses, target, nontarget) = sampler.collect_responses()
                    result_responses[T_i, trecall_i, :, repet_i] = responses
                    result_target[T_i, trecall_i, :, repet_i] = target
                    result_nontargets[T_i, trecall_i, :, :T_i, repet_i] = nontarget


                print "CURRENT RESULTS:\n", result_all_precisions[T_i, trecall_i, repet_i], curr_params_fit, result_fi_theo[T_i, trecall_i, repet_i], result_fi_theocov[T_i, trecall_i, repet_i], np.sum(result_dist_gorgo11_sequ[T_i, trecall_i, :, repet_i]), np.sum(result_dist_gorgo11_sequ_emmixt_KL[T_i, trecall_i, repet_i]), "\n"
                ### /Work ###

                search_progress.increment()
                if run_counter % save_every == 0 or search_progress.done():
                    dataio.save_variables_default(locals())
                run_counter += 1

        # Fit Collapsed mixture model
        # TODO check dimensionality...
        print 'Fitting Collapsed double powerlaw mixture model...'
        params_fit = em_circularmixture_parametrickappa_doublepowerlaw.fit(T_space, result_responses[..., repet_i], result_target[..., repet_i], result_nontargets[..., repet_i], debug=False)

        # First store the parameters that depend on T/trecall
        for i, key in enumerate(['kappa', 'mixt_target_tr', 'mixt_nontargets_tr', 'mixt_random_tr']):
            result_em_fits_collapsed_tr[..., i, repet_i] =  params_fit[key]

        # Then the ones that do not, only one per full collapsed fit.
        result_em_fits_collapsed_summary[0, repet_i] = params_fit['bic']
        # result_em_fits_collapsed_summary[1, repet_i] = params_fit['train_LL']
        result_em_fits_collapsed_summary[2:, repet_i] = params_fit['kappa_theta']

        # Compute distances to dataset for collapsed model
        result_dist_gorgo11_sequ_collapsed[..., repet_i] = (gorgo11_sequ_collapsed_mixtmod_mean - result_em_fits_collapsed_tr[..., repet_i])**2.
        result_dist_gorgo11_sequ_collapsed_emmixt_KL[..., repet_i] = utils.KL_div(result_em_fits_collapsed_tr[..., 1:4, repet_i], gorgo11_sequ_collapsed_mixtmod_mean[..., 1:], axis=-1)


    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def launcher_do_fit_mixturemodel_dualrecall(args):
    '''
        Run the model for T items, trying to fit
        the DualRecall dataset, which has two conditions.

        Get:
        - Precision
        - EM mixture model fits
        - Theoretical Fisher Information
        - EM Mixture model distances
    '''

    print "Doing a piece of work for launcher_do_fit_mixturemodel_dualrecall"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']


    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO.DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0


    # Load datasets to compare against
    data_dualrecall = load_experimental_data.load_data_dualrecall(data_dir=all_parameters['experiment_data_dir'], fit_mixture_model=True)
    dualrecall_T_space = data_dualrecall['data_to_fit']['n_items']

    dualrecall_experimental_angle_emfits_mean = data_dualrecall['em_fits_angle_nitems_arrays']['mean']
    dualrecall_experimental_colour_emfits_mean = data_dualrecall['em_fits_colour_nitems_arrays']['mean']

    # Parameters to vary
    repetitions_axis = -1

    # Result arrays
    result_all_precisions = np.nan*np.empty((all_parameters['num_repetitions']))
    result_fi_theo = np.nan*np.empty((all_parameters['num_repetitions']))
    result_fi_theocov = np.nan*np.empty((all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.empty((6, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll, bic
    result_dist_dualrecall_angle = np.nan*np.empty((4, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random
    result_dist_dualrecall_angle_emmixt_KL = np.nan*np.empty((all_parameters['num_repetitions']))
    result_dist_dualrecall_colour = np.nan*np.empty((4, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random
    result_dist_dualrecall_colour_emmixt_KL = np.nan*np.empty((all_parameters['num_repetitions']))

    # If desired, will automatically save all Model responses.
    if all_parameters['collect_responses']:
        print "--- Collecting all responses..."
        result_responses = np.nan*np.ones((all_parameters['N'], all_parameters['num_repetitions']))
        result_target = np.nan*np.ones((all_parameters['N'], all_parameters['num_repetitions']))
        result_nontargets = np.nan*np.ones((all_parameters['N'], all_parameters['T'] - 1, all_parameters['num_repetitions']))

    search_progress = progress.Progress(all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

        print "Fit for T=%d, %d/%d" % (all_parameters['T'], repet_i+1, all_parameters['num_repetitions'])

        ## Update parameter

        ### WORK WORK WORK work? ###
        # Instantiate
        (_, _, _, sampler) = launchers.init_everything(all_parameters)

        # Sample
        sampler.run_inference(all_parameters)

        # Compute precision
        print "get precision..."
        result_all_precisions[repet_i] = sampler.get_precision()

        # Fit mixture model
        print "fit mixture model..."
        curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
        # curr_params_fit['mixt_nontargets_sum'] = np.sum(curr_params_fit['mixt_nontargets'])
        result_em_fits[:, repet_i] = [curr_params_fit[key] for key in ['kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL', 'bic']]

        # Compute fisher info
        print "compute fisher info"
        result_fi_theo[repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=False)
        result_fi_theocov[repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=True)

        # Compute distances to datasets
        if all_parameters['T'] in dualrecall_T_space:
            # Angle trials
            result_dist_dualrecall_angle[:, repet_i] = (dualrecall_experimental_angle_emfits_mean[:, dualrecall_T_space == all_parameters['T']].flatten() - result_em_fits[:4, repet_i])**2.
            result_dist_dualrecall_angle_emmixt_KL[repet_i] = utils.KL_div(result_em_fits[1:4, repet_i], dualrecall_experimental_angle_emfits_mean[1:, dualrecall_T_space==all_parameters['T']].flatten())

            # Colour trials
            result_dist_dualrecall_colour[:, repet_i] = (dualrecall_experimental_colour_emfits_mean[:, dualrecall_T_space == all_parameters['T']].flatten() - result_em_fits[:4, repet_i])**2.
            result_dist_dualrecall_colour_emmixt_KL[repet_i] = utils.KL_div(result_em_fits[1:4, repet_i], dualrecall_experimental_colour_emfits_mean[1:, dualrecall_T_space==all_parameters['T']].flatten())

        # If needed, store responses
        if all_parameters['collect_responses']:
            (responses, target, nontarget) = sampler.collect_responses()
            result_responses[:, repet_i] = responses
            result_target[:, repet_i] = target
            result_nontargets[..., repet_i] = nontarget

            print "collected responses"


        print "CURRENT RESULTS:\n", result_all_precisions[repet_i], curr_params_fit, result_fi_theo[repet_i], result_fi_theocov[repet_i], np.sum(result_dist_dualrecall_angle[:, repet_i]), np.sum(result_dist_dualrecall_colour[:, repet_i]), "\n"
        ### /Work ###

        search_progress.increment()
        if run_counter % save_every == 0 or search_progress.done():
            dataio.save_variables_default(locals())
        run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def launcher_do_noise_output_effect_allT(args):
    '''
        Run the model for 1..T items, varying sigma_output
    '''

    print "Doing a piece of work for launcher_do_noise_output_effect_allT"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

    if 'plots_during_simulation_callback' in all_parameters:
        plots_during_simulation_callback = all_parameters['plots_during_simulation_callback']
        del all_parameters['plots_during_simulation_callback']
    else:
        plots_during_simulation_callback = None

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO.DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Parameters to vary
    T_max = all_parameters['T']
    T_space = np.arange(1, T_max+1)
    repetitions_axis = -1

    # Parameters to vary
    precision_sigmaoutput = 20
    sigmaoutput_space = np.linspace(0.0, 0.5, precision_sigmaoutput)

    # Result arrays
    result_all_precisions = np.nan*np.ones((sigmaoutput_space.size, T_max, all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.ones((sigmaoutput_space.size, T_max, 6, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll, bic

    search_progress = progress.Progress(sigmaoutput_space.size*T_max*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for sigmaoutput_i, sigma_output in enumerate(sigmaoutput_space):
            for T_i, T in enumerate(T_space):
                print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

                print "Fit for sigma_output=%.3f, T %d, %d/%d" % (sigma_output, T, repet_i+1, all_parameters['num_repetitions'])

                # Update parameter
                all_parameters['sigma_output'] = sigma_output
                all_parameters['T'] = T

                ### WORK WORK WORK work? ###

                # Fix some parameters
                # all_parameters['stimuli_generation'] = 'separated'
                # all_parameters['slice_width'] = np.pi/64.

                # Instantiate
                (_, _, _, sampler) = launchers.init_everything(all_parameters)

                # Sample
                sampler.run_inference(all_parameters)

                # Compute precision
                print "get precision..."
                result_all_precisions[sigmaoutput_i, T_i, repet_i] = sampler.get_precision()

                # Fit mixture model
                print "fit mixture model..."
                curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
                result_em_fits[sigmaoutput_i, T_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL', 'bic')]

                print result_all_precisions[sigmaoutput_i, T_i, repet_i], curr_params_fit

                ## Run callback function if exists
                if plots_during_simulation_callback:
                    print "Doing plots..."
                    try:
                        # Best super safe, if this fails then the simulation must continue!
                        plots_during_simulation_callback['function'](locals(), plots_during_simulation_callback['parameters'])
                        print "plots done."
                    except:
                        print "error during plotting callback function", plots_during_simulation_callback['function'], plots_during_simulation_callback['parameters']

                ### /Work ###
                search_progress.increment()
                if run_counter % save_every == 0 or search_progress.done():
                    dataio.save_variables_default(locals())
                run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def launcher_do_fitexperimentsinglet(args):
    '''
        Perform a simple estimation of the loglikelihood of the data, under a model with provided parameters

        If inference_method is not none, also fits a EM mixture model, get the precision and the fisher information
    '''

    print "Doing a piece of work for launcher_do_fitexperimentsinglet"


    all_parameters = argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

    # Force some parameters
    all_parameters.setdefault('experiment_ids', ['gorgo11', 'bays09', 'dualrecall'])
    if 'fitexperiment_parameters' not in all_parameters:
        fitexperiment_parameters = dict(experiment_ids=all_parameters['experiment_ids'], fit_mixture_model=True)

    print "\n T={:d}, experiment_ids {}\n".format(all_parameters['T'], all_parameters['experiment_ids'])

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Result arrays
    result_fitexperiments = np.nan*np.empty((3, all_parameters['num_repetitions']))  # BIC total, LL, LL90
    result_fitexperiments_all = np.nan*np.empty((3, len(all_parameters['experiment_ids']), all_parameters['num_repetitions']))  # BIC, LL, LL90; per experiments,
    if all_parameters['inference_method'] != 'none':
        result_all_precisions = np.nan*np.empty((all_parameters['num_repetitions']))
        result_em_fits = np.nan*np.empty((6, all_parameters['num_repetitions']))   # kappa, mixt_target, mixt_nontarget, mixt_random, ll, bic
        result_fi_theo = np.nan*np.empty((all_parameters['num_repetitions']))
        result_fi_theocov = np.nan*np.empty((all_parameters['num_repetitions']))

    if all_parameters['sigma_output'] > 0.0:
        # We asked for the additional noise convolved, need to take it into account.
        result_fitexperiments_noiseconv = np.nan*np.empty((3, all_parameters['num_repetitions']))  # bic (K+1), LL conv, LL90 conv
        result_fitexperiments_noiseconv_all = np.nan*np.empty((3, len(all_parameters['experiment_ids']), all_parameters['num_repetitions']))  # bic, LL conv, LL90 conv


    search_progress = progress.Progress(all_parameters['num_repetitions'])
    for repet_i in xrange(all_parameters['num_repetitions']):

        print "%d/%d | %.2f%%, %s left - %s" % (repet_i+1, all_parameters['num_repetitions'], search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

        ### WORK WORK WORK work? ###
        # Instantiate
        (_, _, _, sampler) = launchers.init_everything(all_parameters)

        ### Do the actual FitExperiment computations
        fit_exp = FitExperimentSingleT(sampler, fitexperiment_parameters)

        ## Compute and store the BIC and LL
        if all_parameters['code_type'] == 'mixed':
            K_nb_params = 3
        else:
            K_nb_params = 2

        bic_loglik_dict = fit_exp.compute_bic_loglik_all_datasets(K=K_nb_params)

        for exper_i, exper in enumerate(all_parameters['experiment_ids']):
            try:
                result_fitexperiments_all[0, exper_i, repet_i] = bic_loglik_dict[exper]['bic']
                result_fitexperiments_all[1, exper_i, repet_i] = bic_loglik_dict[exper]['LL']
                result_fitexperiments_all[2, exper_i, repet_i] = bic_loglik_dict[exper]['LL90']
            except TypeError:
                pass

        result_fitexperiments[:, repet_i] = np.nansum(result_fitexperiments_all[..., repet_i], axis=1)

        if all_parameters['sigma_output'] > 0.0:
            # Compute the loglikelihoods with the convolved posterior. Slowish.

            ## Compute and store the BIC and LL
            bic_loglik_noise_convolved_dict = fit_exp.compute_bic_loglik_noise_convolved_all_datasets(precision=150)

            for exper_i, exper in enumerate(all_parameters['experiment_ids']):
                try:
                    result_fitexperiments_noiseconv_all[0, exper_i, repet_i] = bic_loglik_noise_convolved_dict[exper]['bic']
                    result_fitexperiments_noiseconv_all[1, exper_i, repet_i] = bic_loglik_noise_convolved_dict[exper]['LL']
                    result_fitexperiments_noiseconv_all[2, exper_i, repet_i] = bic_loglik_noise_convolved_dict[exper]['LL90']
                except TypeError:
                    pass

            result_fitexperiments_noiseconv[:, repet_i] = np.nansum(result_fitexperiments_noiseconv_all[:, :, repet_i], axis=1)

        # If sampling_method is not none, try to get em_fits and others. EXTRA SLOW.
        if not all_parameters['inference_method'] == 'none':
            parameters = dict([[key, eval(key)] for key in ['all_parameters', 'repet_i', 'result_all_precisions', 'result_em_fits', 'result_fi_theo', 'result_fi_theocov']])

            def additional_computations(sampler, parameters):
                for key, val in parameters.iteritems():
                    locals()[key] = val

                # Sample
                print "sampling..."
                sampler.run_inference(all_parameters)

                # Compute precision
                print "get precision..."
                result_all_precisions[repet_i] = sampler.get_precision()

                # Fit mixture model
                print "fit mixture model..."
                curr_params_fit = sampler.fit_mixture_model(use_all_targets=True)
                result_em_fits[:, repet_i] = [curr_params_fit[key] for key in ['kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL', 'bic']]

                # Compute fisher info
                print "compute fisher info"
                result_fi_theo[repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=False)
                result_fi_theocov[repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=True)

            # Apply that on each dataset!
            fct_infos = dict(fct=additional_computations, parameters=parameters)
            fit_exp.apply_fct_all_datasets(fct_infos)


        print "CURRENT RESULTS:"
        if all_parameters['inference_method'] != 'none':
            print result_all_precisions[repet_i], result_em_fits[:, repet_i], result_fi_theo[repet_i], result_fi_theocov[repet_i]
        print "Fits LL no noise:", bic_loglik_dict

        if all_parameters['sigma_output'] > 0.0:
            print "Fits LL output noise %.2f: %s" %  (all_parameters['sigma_output'], bic_loglik_noise_convolved_dict)

        ### /Work ###

        search_progress.increment()
        if run_counter % save_every == 0 or search_progress.done():
            dataio.save_variables_default(locals())
        run_counter += 1

    ### /Work ###

    additional_variables = ['fitexperiment_parameters']
    dataio.save_variables_default(locals(), additional_variables)

    #### Plots ###

    print "All finished"

    return locals()
def launcher_do_check_scaling_ratio_with_M(args):
    '''
        Reviewer 3 asked to see if the proportion of conjunctive units varies with M when a given precision is to be achieved.

        Check it.
    '''

    print "Doing a piece of work for launcher_do_check_scaling_ratio_with_M"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

    if 'plots_during_simulation_callback' in all_parameters:
        plots_during_simulation_callback = all_parameters['plots_during_simulation_callback']
        del all_parameters['plots_during_simulation_callback']
    else:
        plots_during_simulation_callback = None

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO.DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Fix some parameters
    all_parameters['autoset_parameters'] = True

    # Parameters to vary
    nb_M_space = 10
    M_max = 800
    M_min = 20
    M_space = np.arange(M_min, M_max, np.ceil((M_max - M_min)/float(nb_M_space)), dtype=int)
    nb_ratio_space = 10
    ratio_space = np.linspace(0.0001, 1.0, nb_ratio_space)

    # Result arrays
    result_all_precisions = np.nan*np.ones((M_space.size, ratio_space.size, all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.ones((M_space.size, ratio_space.size, 5, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll

    search_progress = progress.Progress(M_space.size*ratio_space.size*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for M_i, M in enumerate(M_space):
            for ratio_i, ratio in enumerate(ratio_space):
                print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

                print "Fit for M=%d, ratio=%.3f  %d/%d" % (M, ratio, repet_i+1, all_parameters['num_repetitions'])

                # Update parameter
                all_parameters['M'] = M
                all_parameters['ratio_conj'] = ratio

                ### WORK WORK WORK work? ###


                try:
                    # Instantiate
                    (_, _, _, sampler) = launchers.init_everything(all_parameters)

                    # Sample
                    sampler.run_inference(all_parameters)

                    # Compute precision
                    print "get precision..."
                    result_all_precisions[M_i, ratio_i, repet_i] = sampler.get_precision()

                    # Fit mixture model
                    print "fit mixture model..."
                    curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
                    result_em_fits[M_i, ratio_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL')]

                except Exception:
                    # oh well...
                    print "something failed here, so sad"

                print result_all_precisions[M_i, ratio_i, repet_i], curr_params_fit

                ## Run callback function if exists
                if plots_during_simulation_callback:
                    print "Doing plots..."
                    try:
                        # Best super safe, if this fails then the simulation must continue!
                        plots_during_simulation_callback['function'](locals(), plots_during_simulation_callback['parameters'])
                        print "plots done."
                    except Exception as e:
                        print "error during plotting callback function", plots_during_simulation_callback['function'], plots_during_simulation_callback['parameters']
                        print e
                        traceback.print_exc()

                ### /Work ###
                search_progress.increment()
                if run_counter % save_every == 0 or search_progress.done():
                    dataio.save_variables_default(locals())
                run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def plots_specific_stimuli_mixed(data_pbs, generator_module=None):
    '''
        Reload and plot behaviour of mixed population code on specific Stimuli
        of 3 items.
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plot_per_min_dist_all = False
    specific_plots_paper = False
    plots_emfit_allitems = False
    plot_min_distance_effect = True

    compute_bootstraps = False

    should_fit_allitems_model = True
    # caching_emfit_filename = None
    mixturemodel_to_use = 'allitems_uniquekappa'
    # caching_emfit_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'cache_emfitallitems_uniquekappa.pickle')
    # mixturemodel_to_use = 'allitems_fikappa'

    caching_emfit_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'cache_emfit%s.pickle' % mixturemodel_to_use)

    compute_fisher_info_perratioconj = True
    caching_fisherinfo_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'cache_fisherinfo.pickle')

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    all_args = data_pbs.loaded_data['args_list']
    result_all_precisions_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_all_precisions_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_all_precisions']['results']), axis=-1)
    result_em_fits_mean = utils.nanmean(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_fits_std = utils.nanstd(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results']), axis=-1)
    result_em_kappastddev_mean = utils.nanmean(utils.kappa_to_stddev(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results'])[..., 0, :]), axis=-1)
    result_em_kappastddev_std = utils.nanstd(utils.kappa_to_stddev(np.squeeze(data_pbs.dict_arrays['result_em_fits']['results'])[..., 0, :]), axis=-1)
    result_responses_all = np.squeeze(data_pbs.dict_arrays['result_responses']['results'])
    result_target_all = np.squeeze(data_pbs.dict_arrays['result_target']['results'])
    result_nontargets_all = np.squeeze(data_pbs.dict_arrays['result_nontargets']['results'])

    nb_repetitions = result_responses_all.shape[-1]
    K = result_nontargets_all.shape[-2]
    N = result_responses_all.shape[-2]

    enforce_min_distance_space = data_pbs.loaded_data['parameters_uniques']['enforce_min_distance']
    sigmax_space = data_pbs.loaded_data['parameters_uniques']['sigmax']
    ratio_space = data_pbs.loaded_data['datasets_list'][0]['ratio_space']

    print enforce_min_distance_space
    print sigmax_space
    print ratio_space
    print result_all_precisions_mean.shape, result_em_fits_mean.shape
    print result_responses_all.shape

    dataio = DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    # Reload cached emfitallitems
    if caching_emfit_filename is not None:
        if os.path.exists(caching_emfit_filename):
            # Got file, open it and try to use its contents
            try:
                with open(caching_emfit_filename, 'r') as file_in:
                    # Load and assign values
                    print "Reloader EM fits from cache", caching_emfit_filename
                    cached_data = pickle.load(file_in)
                    result_emfitallitems = cached_data['result_emfitallitems']
                    mixturemodel_used = cached_data.get('mixturemodel_used', '')

                    if mixturemodel_used != mixturemodel_to_use:
                        print "warning, reloaded model used a different mixture model class"
                    should_fit_allitems_model = False

            except IOError:
                print "Error while loading ", caching_emfit_filename, "falling back to computing the EM fits"


    # Load the Fisher Info from cache if exists. If not, compute it.
    if caching_fisherinfo_filename is not None:
        if os.path.exists(caching_fisherinfo_filename):
            # Got file, open it and try to use its contents
            try:
                with open(caching_fisherinfo_filename, 'r') as file_in:
                    # Load and assign values
                    cached_data = pickle.load(file_in)
                    result_fisherinfo_mindist_sigmax_ratio = cached_data['result_fisherinfo_mindist_sigmax_ratio']
                    compute_fisher_info_perratioconj = False

            except IOError:
                print "Error while loading ", caching_fisherinfo_filename, "falling back to computing the Fisher Info"

    if compute_fisher_info_perratioconj:
        # We did not save the Fisher info, but need it if we want to fit the mixture model with fixed kappa. So recompute them using the args_dicts

        result_fisherinfo_mindist_sigmax_ratio = np.empty((enforce_min_distance_space.size, sigmax_space.size, ratio_space.size))

        # Invert the all_args_i -> min_dist, sigmax indexing
        parameters_indirections = data_pbs.loaded_data['parameters_dataset_index']

        # min_dist_i, sigmax_level_i, ratio_i
        for min_dist_i, min_dist in enumerate(enforce_min_distance_space):
            for sigmax_i, sigmax in enumerate(sigmax_space):
                # Get index of first dataset with the current (min_dist, sigmax) (no need for the others, I think)
                arg_index = parameters_indirections[(min_dist, sigmax)][0]

                # Now using this dataset, reconstruct a RandomFactorialNetwork and compute the fisher info
                curr_args = all_args[arg_index]

                for ratio_conj_i, ratio_conj in enumerate(ratio_space):
                    # Update param
                    curr_args['ratio_conj'] = ratio_conj
                    # curr_args['stimuli_generation'] = 'specific_stimuli'

                    (_, _, _, sampler) = launchers.init_everything(curr_args)

                    # Theo Fisher info
                    result_fisherinfo_mindist_sigmax_ratio[min_dist_i, sigmax_i, ratio_conj_i] = sampler.estimate_fisher_info_theocov()

                    print "Min dist: %.2f, Sigmax: %.2f, Ratio: %.2f: %.3f" % (min_dist, sigmax, ratio_conj, result_fisherinfo_mindist_sigmax_ratio[min_dist_i, sigmax_i, ratio_conj_i])


        # Save everything to a file, for faster later plotting
        if caching_fisherinfo_filename is not None:
            try:
                with open(caching_fisherinfo_filename, 'w') as filecache_out:
                    data_cache = dict(result_fisherinfo_mindist_sigmax_ratio=result_fisherinfo_mindist_sigmax_ratio)
                    pickle.dump(data_cache, filecache_out, protocol=2)
            except IOError:
                print "Error writing out to caching file ", caching_fisherinfo_filename


    if plot_per_min_dist_all:
        # Do one plot per min distance.
        for min_dist_i, min_dist in enumerate(enforce_min_distance_space):
            # Show log precision
            utils.pcolor_2d_data(result_all_precisions_mean[min_dist_i].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Precision, min_dist=%.3f' % min_dist)
            if savefigs:
                dataio.save_current_figure('precision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Show log precision
            utils.pcolor_2d_data(result_all_precisions_mean[min_dist_i].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Precision, min_dist=%.3f' % min_dist, log_scale=True)
            if savefigs:
                dataio.save_current_figure('logprecision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)


            # Plot estimated model precision
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 0].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='EM precision, min_dist=%.3f' % min_dist, log_scale=False)
            if savefigs:
                dataio.save_current_figure('logemprecision_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Plot estimated Target, nontarget and random mixture components, in multiple subplots
            _, axes = plt.subplots(1, 3, figsize=(18, 6))
            plt.subplots_adjust(left=0.05, right=0.97, wspace = 0.3, bottom=0.15)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 1].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Target, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[0], ticks_interpolate=5)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 2].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Nontarget, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[1], ticks_interpolate=5)
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., 3].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='Random, min_dist=%.3f' % min_dist, log_scale=False, ax_handle=axes[2], ticks_interpolate=5)

            if savefigs:
                dataio.save_current_figure('em_mixtureprobs_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

            # Plot Log-likelihood of Mixture model, sanity check
            utils.pcolor_2d_data(result_em_fits_mean[min_dist_i, ..., -1].T, x=ratio_space, y=sigmax_space, xlabel='ratio', ylabel='sigma_x', title='EM loglik, min_dist=%.3f' % min_dist, log_scale=False)
            if savefigs:
                dataio.save_current_figure('em_loglik_permindist_mindist%.2f_ratiosigmax_{label}_{unique_id}.pdf' % min_dist)

    if specific_plots_paper:
        # We need to choose 3 levels of min_distances
        target_sigmax = 0.25
        target_mindist_low = 0.15
        target_mindist_medium = 0.36
        target_mindist_high = 1.5

        sigmax_level_i = np.argmin(np.abs(sigmax_space - target_sigmax))
        min_dist_level_low_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_low))
        min_dist_level_medium_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_medium))
        min_dist_level_high_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_high))

        ## Do for each distance
        # for min_dist_i in [min_dist_level_low_i, min_dist_level_medium_i, min_dist_level_high_i]:
        for min_dist_i in xrange(enforce_min_distance_space.size):
            # Plot precision
            if False:
                utils.plot_mean_std_area(ratio_space, result_all_precisions_mean[min_dist_i, sigmax_level_i], result_all_precisions_std[min_dist_i, sigmax_level_i]) #, xlabel='Ratio conjunctivity', ylabel='Precision of recall')
                # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
                plt.ylim([0, np.max(result_all_precisions_mean[min_dist_i, sigmax_level_i] + result_all_precisions_std[min_dist_i, sigmax_level_i])])

                if savefigs:
                    dataio.save_current_figure('mindist%.2f_precisionrecall_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot kappa fitted
            ax_handle = utils.plot_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 0], result_em_fits_std[min_dist_i, sigmax_level_i, :, 0]) #, xlabel='Ratio conjunctivity', ylabel='Fitted kappa')
            # Add distance between items in kappa units
            dist_items_kappa = utils.stddev_to_kappa(enforce_min_distance_space[min_dist_i])
            ax_handle.plot(ratio_space, dist_items_kappa*np.ones(ratio_space.size), 'k--', linewidth=3)
            plt.ylim([-0.1, np.max((np.max(result_em_fits_mean[min_dist_i, sigmax_level_i, :, 0] + result_em_fits_std[min_dist_i, sigmax_level_i, :, 0]), 1.1*dist_items_kappa))])
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emkappa_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

            # Plot kappa-stddev fitted. Easier to visualize
            ax_handle = utils.plot_mean_std_area(ratio_space, result_em_kappastddev_mean[min_dist_i, sigmax_level_i], result_em_kappastddev_std[min_dist_i, sigmax_level_i]) #, xlabel='Ratio conjunctivity', ylabel='Fitted kappa_stddev')
            # Add distance between items in std dev units
            dist_items_std = (enforce_min_distance_space[min_dist_i])
            ax_handle.plot(ratio_space, dist_items_std*np.ones(ratio_space.size), 'k--', linewidth=3)
            # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
            plt.ylim([0, 1.1*np.max((np.max(result_em_kappastddev_mean[min_dist_i, sigmax_level_i] + result_em_kappastddev_std[min_dist_i, sigmax_level_i]), dist_items_std))])
            if savefigs:
                dataio.save_current_figure('mindist%.2f_emkappastddev_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])


            if False:
                # Plot LLH
                utils.plot_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, -1], result_em_fits_std[min_dist_i, sigmax_level_i, :, -1]) #, xlabel='Ratio conjunctivity', ylabel='Loglikelihood of Mixture model fit')
                # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
                if savefigs:
                    dataio.save_current_figure('mindist%.2f_emllh_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

                # Plot mixture parameters, std
                utils.plot_multiple_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 1:4].T, result_em_fits_std[min_dist_i, sigmax_level_i, :, 1:4].T)
                plt.ylim([0.0, 1.1])
                # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
                # plt.legend("Target", "Non-target", "Random")
                if savefigs:
                    dataio.save_current_figure('mindist%.2f_emprobs_forpaper_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

                # Mixture parameters, SEM
                utils.plot_multiple_mean_std_area(ratio_space, result_em_fits_mean[min_dist_i, sigmax_level_i, :, 1:4].T, result_em_fits_std[min_dist_i, sigmax_level_i, :, 1:4].T/np.sqrt(nb_repetitions))
                plt.ylim([0.0, 1.1])
                # plt.title('Min distance %.3f' % enforce_min_distance_space[min_dist_i])
                # plt.legend("Target", "Non-target", "Random")
                if savefigs:
                    dataio.save_current_figure('mindist%.2f_emprobs_forpaper_sem_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

    if plots_emfit_allitems:
        # We need to choose 3 levels of min_distances
        target_sigmax = 0.25
        target_mindist_low = 0.15
        target_mindist_medium = 0.36
        target_mindist_high = 1.5

        sigmax_level_i = np.argmin(np.abs(sigmax_space - target_sigmax))
        min_dist_level_low_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_low))
        min_dist_level_medium_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_medium))
        min_dist_level_high_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_high))

        min_dist_i_plotting_space = np.array([min_dist_level_low_i, min_dist_level_medium_i, min_dist_level_high_i])

        if should_fit_allitems_model:

            # kappa, mixt_target, mixt_nontargets (K), mixt_random, LL, bic
            # result_emfitallitems = np.empty((min_dist_i_plotting_space.size, ratio_space.size, 2*K+5))*np.nan
            result_emfitallitems = np.empty((enforce_min_distance_space.size, ratio_space.size, K+5))*np.nan

            ## Do for each distance
            # for min_dist_plotting_i, min_dist_i in enumerate(min_dist_i_plotting_space):
            for min_dist_i in xrange(enforce_min_distance_space.size):
                # Fit the mixture model
                for ratio_i, ratio in enumerate(ratio_space):
                    print "Refitting EM all items. Ratio:", ratio, "Dist:", enforce_min_distance_space[min_dist_i]

                    if mixturemodel_to_use == 'allitems_uniquekappa':
                        em_fit = em_circularmixture_allitems_uniquekappa.fit(
                            result_responses_all[min_dist_i, sigmax_level_i, ratio_i].flatten(),
                            result_target_all[min_dist_i, sigmax_level_i, ratio_i].flatten(),
                            result_nontargets_all[min_dist_i, sigmax_level_i, ratio_i].transpose((0, 2, 1)).reshape((N*nb_repetitions, K)))
                    elif mixturemodel_to_use == 'allitems_fikappa':
                        em_fit = em_circularmixture_allitems_kappafi.fit(result_responses_all[min_dist_i, sigmax_level_i, ratio_i].flatten(),
                            result_target_all[min_dist_i, sigmax_level_i, ratio_i].flatten(),
                            result_nontargets_all[min_dist_i, sigmax_level_i, ratio_i].transpose((0, 2, 1)).reshape((N*nb_repetitions, K)),
                            kappa=result_fisherinfo_mindist_sigmax_ratio[min_dist_i, sigmax_level_i, ratio_i])
                    else:
                        raise ValueError("Wrong mixturemodel_to_use, %s" % mixturemodel_to_use)

                    result_emfitallitems[min_dist_i, ratio_i] = [em_fit['kappa'], em_fit['mixt_target']] + em_fit['mixt_nontargets'].tolist() + [em_fit[key] for key in ('mixt_random', 'train_LL', 'bic')]

            # Save everything to a file, for faster later plotting
            if caching_emfit_filename is not None:
                try:
                    with open(caching_emfit_filename, 'w') as filecache_out:
                        data_em = dict(result_emfitallitems=result_emfitallitems, target_sigmax=target_sigmax)
                        pickle.dump(data_em, filecache_out, protocol=2)
                except IOError:
                    print "Error writing out to caching file ", caching_emfit_filename


        ## Plots now, for each distance!
        # for min_dist_plotting_i, min_dist_i in enumerate(min_dist_i_plotting_space):
        for min_dist_i in xrange(enforce_min_distance_space.size):

            # Plot now
            _, ax = plt.subplots()
            ax.plot(ratio_space, result_emfitallitems[min_dist_i, :, 1:5], linewidth=3)
            plt.ylim([0.0, 1.1])
            plt.legend(['Target', 'Nontarget 1', 'Nontarget 2', 'Random'], loc='upper left')

            if savefigs:
                dataio.save_current_figure('mindist%.2f_emprobsfullitems_{label}_{unique_id}.pdf' % enforce_min_distance_space[min_dist_i])

    if plot_min_distance_effect:
        conj_receptive_field_size = 2.*np.pi/((all_args[0]['M']*ratio_space)**0.5)

        target_vs_nontargets_mindist_ratio = result_emfitallitems[..., 1]/np.sum(result_emfitallitems[..., 1:4], axis=-1)
        nontargetsmean_vs_targnontarg_mindist_ratio = np.mean(result_emfitallitems[..., 2:4]/np.sum(result_emfitallitems[..., 1:4], axis=-1)[..., np.newaxis], axis=-1)

        for ratio_conj_i, ratio_conj in enumerate(ratio_space):
            # Do one plot per ratio, putting the receptive field size on each
            f, ax = plt.subplots()

            ax.plot(enforce_min_distance_space[1:], target_vs_nontargets_mindist_ratio[1:, ratio_conj_i], linewidth=3, label='target mixture')
            ax.plot(enforce_min_distance_space[1:], nontargetsmean_vs_targnontarg_mindist_ratio[1:, ratio_conj_i], linewidth=3, label='non-target mixture')
            # ax.plot(enforce_min_distance_space[1:], result_emfitallitems[1:, ratio_conj_i, 1:5], linewidth=3)

            ax.axvline(x=conj_receptive_field_size[ratio_conj_i]/2., color='k', linestyle='--', linewidth=2)
            ax.axvline(x=conj_receptive_field_size[ratio_conj_i]*2., color='r', linestyle='--', linewidth=2)

            plt.legend(loc='upper left')
            plt.grid()
            # ax.set_xlabel('Stimuli separation')
            # ax.set_ylabel('Ratio Target to Non-targets')
            plt.axis('tight')
            ax.set_ylim([0.0, 1.0])
            ax.set_xlim([enforce_min_distance_space[1:].min(), enforce_min_distance_space[1:].max()])

            if savefigs:
                dataio.save_current_figure('ratio%.2f_mindistpred_ratiotargetnontarget_{label}_{unique_id}.pdf' % ratio_conj)


    if compute_bootstraps:
        ## Bootstrap evaluation

        # We need to choose 3 levels of min_distances
        target_sigmax = 0.25
        target_mindist_low = 0.15
        target_mindist_medium = 0.5
        target_mindist_high = 1.

        sigmax_level_i = np.argmin(np.abs(sigmax_space - target_sigmax))
        min_dist_level_low_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_low))
        min_dist_level_medium_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_medium))
        min_dist_level_high_i = np.argmin(np.abs(enforce_min_distance_space - target_mindist_high))

        # cache_bootstrap_fn = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'outputs', 'cache_bootstrap.pickle')
        cache_bootstrap_fn = '/Users/loicmatthey/Dropbox/UCL/1-phd/Work/Visual_working_memory/code/git-bayesian-visual-working-memory/Experiments/specific_stimuli/specific_stimuli_corrected_mixed_sigmaxmindistance_autoset_repetitions5mult_collectall_281113_outputs/cache_bootstrap.pickle'
        try:
            with open(cache_bootstrap_fn, 'r') as file_in:
                # Load and assign values
                cached_data = pickle.load(file_in)
                bootstrap_ecdf_bays_sigmax_T = cached_data['bootstrap_ecdf_bays_sigmax_T']
                bootstrap_ecdf_allitems_sum_sigmax_T = cached_data['bootstrap_ecdf_allitems_sum_sigmax_T']
                bootstrap_ecdf_allitems_all_sigmax_T = cached_data['bootstrap_ecdf_allitems_all_sigmax_T']
                should_fit_bootstrap = False

        except IOError:
            print "Error while loading ", cache_bootstrap_fn

        ratio_i = 0

        # bootstrap_allitems_nontargets_allitems_uniquekappa = em_circularmixture_allitems_uniquekappa.bootstrap_nontarget_stat(
        # result_responses_all[min_dist_level_low_i, sigmax_level_i, ratio_i].flatten(),
        # result_target_all[min_dist_level_low_i, sigmax_level_i, ratio_i].flatten(),
        # result_nontargets_all[min_dist_level_low_i, sigmax_level_i, ratio_i].transpose((0, 2, 1)).reshape((N*nb_repetitions, K)),
        # sumnontargets_bootstrap_ecdf=bootstrap_ecdf_allitems_sum_sigmax_T[sigmax_level_i][K]['ecdf'],
        # allnontargets_bootstrap_ecdf=bootstrap_ecdf_allitems_all_sigmax_T[sigmax_level_i][K]['ecdf']

        # TODO FINISH HERE

    variables_to_save = ['nb_repetitions']

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)

        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='specific_stimuli')


    plt.show()


    return locals()
def launcher_do_receptivesize_effect(args):
    '''
        Run the model for 1 item, varying the receptive size scale.
        Compute:
        - Precision of samples
        - EM mixture model fits
        - Marginal Inverse Fisher Information
    '''

    print "Doing a piece of work for launcher_do_receptivesize_effect"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

    if 'plots_during_simulation_callback' in all_parameters:
        plots_during_simulation_callback = all_parameters['plots_during_simulation_callback']
        del all_parameters['plots_during_simulation_callback']
    else:
        plots_during_simulation_callback = None

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO.DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Fix some parameters
    all_parameters['autoset_parameters'] = False
    all_parameters['feat_ratio'] = -1.  # hack to automatically set the ratio

    # Parameters to vary
    rcscale_space = np.linspace(0.0001, 40., 30)

    # Result arrays
    result_all_precisions = np.nan*np.ones((rcscale_space.size, all_parameters['num_repetitions']))
    result_marginal_inv_fi = np.nan*np.ones((rcscale_space.size, 4, all_parameters['num_repetitions']))  # inv_FI, inv_FI_std, FI, FI_std
    result_em_fits = np.nan*np.ones((rcscale_space.size, 5, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll

    search_progress = progress.Progress(rcscale_space.size*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for rc_scale_i, rc_scale in enumerate(rcscale_space):
            print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

            print "Fit for rc_scale=%.2f, %d/%d" % (rc_scale, repet_i+1, all_parameters['num_repetitions'])

            # Update parameter
            all_parameters['rc_scale'] = rc_scale

            ### WORK WORK WORK work? ###

            # Instantiate
            (_, _, _, sampler) = launchers.init_everything(all_parameters)

            # Sample
            sampler.run_inference(all_parameters)

            # Compute precision
            print "get precision..."
            result_all_precisions[rc_scale_i, repet_i] = sampler.get_precision()

            # Fit mixture model
            print "fit mixture model..."
            curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
            result_em_fits[rc_scale_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL')]

            # Compute marginal inverse fisher info
            print "compute marginal inverse fisher info"
            marginal_fi_dict = sampler.estimate_marginal_inverse_fisher_info_montecarlo()
            result_marginal_inv_fi[rc_scale_i, :, repet_i] = [marginal_fi_dict[key] for key in ('inv_FI', 'inv_FI_std', 'FI', 'FI_std')]


            print result_all_precisions[rc_scale_i, repet_i], curr_params_fit, marginal_fi_dict

            ## Run callback function if exists
            if plots_during_simulation_callback:
                print "Doing plots..."
                try:
                    # Best super safe, if this fails then the simulation must continue!
                    plots_during_simulation_callback['function'](locals(), plots_during_simulation_callback['parameters'])
                    print "plots done."
                except Exception as e:
                    print "error during plotting callback function", plots_during_simulation_callback['function'], plots_during_simulation_callback['parameters']
                    print e
                    traceback.print_exc()

            ### /Work ###
            search_progress.increment()
            if run_counter % save_every == 0 or search_progress.done():
                dataio.save_variables_default(locals())
            run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def plots_misbinding_logposterior(data_pbs, generator_module=None):
    '''
        Reload 3D volume runs from PBS and plot them

    '''


    #### SETUP
    #
    savedata = False
    savefigs = True

    plot_logpost = False
    plot_error = False
    plot_mixtmodel = True
    plot_hist_responses_fisherinfo = True
    compute_plot_bootstrap = False
    compute_fisher_info_perratioconj = True

    # mixturemodel_to_use = 'original'
    mixturemodel_to_use = 'allitems'
    # mixturemodel_to_use = 'allitems_kappafi'

    caching_fisherinfo_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'cache_fisherinfo.pickle')


    #
    #### /SETUP

    print "Order parameters: ", generator_module.dict_parameters_range.keys()

    result_all_log_posterior = np.squeeze(data_pbs.dict_arrays['result_all_log_posterior']['results'])
    result_all_thetas = np.squeeze(data_pbs.dict_arrays['result_all_thetas']['results'])

    ratio_space = data_pbs.loaded_data['parameters_uniques']['ratio_conj']

    print ratio_space
    print result_all_log_posterior.shape

    N = result_all_thetas.shape[-1]

    result_prob_wrong = np.zeros((ratio_space.size, N))
    result_em_fits = np.empty((ratio_space.size, 6))*np.nan

    all_args = data_pbs.loaded_data['args_list']

    fixed_means = [-np.pi*0.6, np.pi*0.6]
    all_angles = np.linspace(-np.pi, np.pi, result_all_log_posterior.shape[-1])

    dataio = DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])


    plt.rcParams['font.size'] = 18


    if plot_hist_responses_fisherinfo:

        # From cache
        if caching_fisherinfo_filename is not None:
            if os.path.exists(caching_fisherinfo_filename):
                # Got file, open it and try to use its contents
                try:
                    with open(caching_fisherinfo_filename, 'r') as file_in:
                        # Load and assign values
                        cached_data = pickle.load(file_in)
                        result_fisherinfo_ratio = cached_data['result_fisherinfo_ratio']
                        compute_fisher_info_perratioconj = False

                except IOError:
                    print "Error while loading ", caching_fisherinfo_filename, "falling back to computing the Fisher Info"

        if compute_fisher_info_perratioconj:
            # We did not save the Fisher info, but need it if we want to fit the mixture model with fixed kappa. So recompute them using the args_dicts

            result_fisherinfo_ratio = np.empty(ratio_space.shape)

            # Invert the all_args_i -> ratio_conj direction
            parameters_indirections = data_pbs.loaded_data['parameters_dataset_index']

            for ratio_conj_i, ratio_conj in enumerate(ratio_space):
                # Get index of first dataset with the current ratio_conj (no need for the others, I think)
                arg_index = parameters_indirections[(ratio_conj,)][0]

                # Now using this dataset, reconstruct a RandomFactorialNetwork and compute the fisher info
                curr_args = all_args[arg_index]

                curr_args['stimuli_generation'] = lambda T: np.linspace(-np.pi*0.6, np.pi*0.6, T)

                (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(curr_args)

                # Theo Fisher info
                result_fisherinfo_ratio[ratio_conj_i] = sampler.estimate_fisher_info_theocov()

                del curr_args['stimuli_generation']

            # Save everything to a file, for faster later plotting
            if caching_fisherinfo_filename is not None:
                try:
                    with open(caching_fisherinfo_filename, 'w') as filecache_out:
                        data_cache = dict(result_fisherinfo_ratio=result_fisherinfo_ratio)
                        pickle.dump(data_cache, filecache_out, protocol=2)
                except IOError:
                    print "Error writing out to caching file ", caching_fisherinfo_filename

        # Now plots. Do histograms of responses (around -pi/6 and pi/6), add Von Mises derived from Theo FI on top, and vertical lines for the correct target/nontarget angles.
        for ratio_conj_i, ratio_conj in enumerate(ratio_space):
            # Histogram
            ax = utils.hist_angular_data(result_all_thetas[ratio_conj_i], bins=100, title='ratio %.2f, fi %.0f' % (ratio_conj, result_fisherinfo_ratio[ratio_conj_i]))
            bar_heights, _, _ = utils.histogram_binspace(result_all_thetas[ratio_conj_i], bins=100, norm='density')

            # Add Fisher info prediction on top
            x = np.linspace(-np.pi, np.pi, 1000)
            if result_fisherinfo_ratio[ratio_conj_i] < 700:
                # Von Mises PDF
                utils.plot_vonmises_pdf(x, utils.stddev_to_kappa(1./result_fisherinfo_ratio[ratio_conj_i]**0.5), mu=fixed_means[-1], ax_handle=ax, linewidth=3, color='r', scale=np.max(bar_heights), fmt='-')
            else:
                # Switch to Gaussian instead
                utils.plot_normal_pdf(x, mu=fixed_means[-1], std=1./result_fisherinfo_ratio[ratio_conj_i]**0.5, ax_handle=ax, linewidth=3, color='r', scale=np.max(bar_heights), fmt='-')

            # ax.set_xticks([])
            # ax.set_yticks([])

            # Add vertical line to correct target/nontarget
            ax.axvline(x=fixed_means[0], color='g', linewidth=2)
            ax.axvline(x=fixed_means[1], color='r', linewidth=2)

            ax.get_figure().canvas.draw()

            if savefigs:
                # plt.tight_layout()
                dataio.save_current_figure('results_misbinding_histresponses_vonmisespdf_ratioconj%.2f{label}_{unique_id}.pdf' % (ratio_conj))



    if plot_logpost:
        for ratio_conj_i, ratio_conj in enumerate(ratio_space):
            # ax = utils.plot_mean_std_area(all_angles, nanmean(result_all_log_posterior[ratio_conj_i], axis=0), nanstd(result_all_log_posterior[ratio_conj_i], axis=0))

            # ax.set_xlim((-np.pi, np.pi))
            # ax.set_xticks((-np.pi, -np.pi / 2, 0, np.pi / 2., np.pi))
            # ax.set_xticklabels((r'$-\pi$', r'$-\frac{\pi}{2}$', r'$0$', r'$\frac{\pi}{2}$', r'$\pi$'))
            # ax.set_yticks(())

            # ax.get_figure().canvas.draw()

            # if savefigs:
            #     dataio.save_current_figure('results_misbinding_logpost_ratioconj%.2f_{label}_global_{unique_id}.pdf' % ratio_conj)


            # Compute the probability of answering wrongly (from fitting mixture distrib onto posterior)
            for n in xrange(result_all_log_posterior.shape[1]):
                result_prob_wrong[ratio_conj_i, n], _, _ = utils.fit_gaussian_mixture_fixedmeans(all_angles, np.exp(result_all_log_posterior[ratio_conj_i, n]), fixed_means=fixed_means, normalise=True, return_fitted_data=False, should_plot=False)

        # ax = utils.plot_mean_std_area(ratio_space, nanmean(result_prob_wrong, axis=-1), nanstd(result_prob_wrong, axis=-1))
        plt.figure()
        plt.plot(ratio_space, utils.nanmean(result_prob_wrong, axis=-1))

        # ax.get_figure().canvas.draw()
        if savefigs:
            dataio.save_current_figure('results_misbinding_probwrongpost_allratioconj_{label}_global_{unique_id}.pdf')

    if plot_error:

        ## Compute Standard deviation/precision from samples and plot it as a function of ratio_conj
        stats = utils.compute_mean_std_circular_data(utils.wrap_angles(result_all_thetas - fixed_means[1]).T)

        f = plt.figure()
        plt.plot(ratio_space, stats['std'])
        plt.ylabel('Standard deviation [rad]')

        if savefigs:
            dataio.save_current_figure('results_misbinding_stddev_allratioconj_{label}_global_{unique_id}.pdf')

        f = plt.figure()
        plt.plot(ratio_space, utils.compute_angle_precision_from_std(stats['std'], square_precision=False), linewidth=2)
        plt.ylabel('Precision [$1/rad$]')
        plt.xlabel('Proportion of conjunctive units')
        plt.grid()

        if savefigs:
            dataio.save_current_figure('results_misbinding_precision_allratioconj_{label}_global_{unique_id}.pdf')

        ## Compute the probability of misbinding
        # 1) Just count samples < 0 / samples tot
        # 2) Fit a mixture model, average over mixture probabilities
        prob_smaller0 = np.sum(result_all_thetas <= 1, axis=1)/float(result_all_thetas.shape[1])

        em_centers = np.zeros((ratio_space.size, 2))
        em_covs = np.zeros((ratio_space.size, 2))
        em_pk = np.zeros((ratio_space.size, 2))
        em_ll = np.zeros(ratio_space.size)
        for ratio_conj_i, ratio_conj in enumerate(ratio_space):
            cen_lst, cov_lst, em_pk[ratio_conj_i], em_ll[ratio_conj_i] = pygmm.em(result_all_thetas[ratio_conj_i, np.newaxis].T, K = 2, max_iter = 400, init_kw={'cluster_init':'fixed', 'fixed_means': fixed_means})

            em_centers[ratio_conj_i] = np.array(cen_lst).flatten()
            em_covs[ratio_conj_i] = np.array(cov_lst).flatten()

        # print em_centers
        # print em_covs
        # print em_pk

        f = plt.figure()
        plt.plot(ratio_space, prob_smaller0)
        plt.ylabel('Misbound proportion')
        if savefigs:
            dataio.save_current_figure('results_misbinding_countsmaller0_allratioconj_{label}_global_{unique_id}.pdf')

        f = plt.figure()
        plt.plot(ratio_space, np.max(em_pk, axis=-1), 'g', linewidth=2)
        plt.ylabel('Mixture proportion, correct')
        plt.xlabel('Proportion of conjunctive units')
        plt.grid()
        if savefigs:
            dataio.save_current_figure('results_misbinding_emmixture_allratioconj_{label}_global_{unique_id}.pdf')


        # Put everything on one figure
        f = plt.figure(figsize=(10, 6))
        norm_for_plot = lambda x: (x - np.min(x))/np.max((x - np.min(x)))
        plt.plot(ratio_space, norm_for_plot(stats['std']), ratio_space, norm_for_plot(utils.compute_angle_precision_from_std(stats['std'], square_precision=False)), ratio_space, norm_for_plot(prob_smaller0), ratio_space, norm_for_plot(em_pk[:, 1]), ratio_space, norm_for_plot(em_pk[:, 0]))
        plt.legend(('Std dev', 'Precision', 'Prob smaller 1', 'Mixture proportion correct', 'Mixture proportion misbinding'))
        # plt.plot(ratio_space, norm_for_plot(compute_angle_precision_from_std(stats['std'], square_precision=False)), ratio_space, norm_for_plot(em_pk[:, 1]), linewidth=2)
        # plt.legend(('Precision', 'Mixture proportion correct'), loc='best')
        plt.grid()
        if savefigs:
            dataio.save_current_figure('results_misbinding_allmetrics_allratioconj_{label}_global_{unique_id}.pdf')


    if plot_mixtmodel:
        # Fit Paul's model
        target_angle = np.ones(N)*fixed_means[1]
        nontarget_angles = np.ones((N, 1))*fixed_means[0]

        for ratio_conj_i, ratio_conj in enumerate(ratio_space):
            print "Ratio: ", ratio_conj

            responses = result_all_thetas[ratio_conj_i]

            if mixturemodel_to_use == 'allitems_kappafi':
                curr_params_fit = em_circularmixture_allitems_kappafi.fit(responses, target_angle, nontarget_angles, kappa=result_fisherinfo_ratio[ratio_conj_i])
            elif mixturemodel_to_use == 'allitems':
                curr_params_fit = em_circularmixture_allitems_uniquekappa.fit(responses, target_angle, nontarget_angles)
            else:
                curr_params_fit = em_circularmixture.fit(responses, target_angle, nontarget_angles)

            result_em_fits[ratio_conj_i] = [curr_params_fit['kappa'], curr_params_fit['mixt_target']] + utils.arrnum_to_list(curr_params_fit['mixt_nontargets']) + [curr_params_fit[key] for key in ('mixt_random', 'train_LL', 'bic')]

            print curr_params_fit


        if False:
            f, ax = plt.subplots()
            ax2 = ax.twinx()

            # left axis, kappa
            ax = utils.plot_mean_std_area(ratio_space, result_em_fits[:, 0], 0*result_em_fits[:, 0], xlabel='Proportion of conjunctive units', ylabel="Inverse variance $[rad^{-2}]$", ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Fitted kappa', color='k')

            # Right axis, mixture probabilities
            utils.plot_mean_std_area(ratio_space, result_em_fits[:, 1], 0*result_em_fits[:, 1], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Target')
            utils.plot_mean_std_area(ratio_space, result_em_fits[:, 2], 0*result_em_fits[:, 2], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Nontarget')
            utils.plot_mean_std_area(ratio_space, result_em_fits[:, 3], 0*result_em_fits[:, 3], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Random')

            lines, labels = ax.get_legend_handles_labels()
            lines2, labels2 = ax2.get_legend_handles_labels()
            ax.legend(lines + lines2, labels + labels2, fontsize=12, loc='right')

            # ax.set_xlim([0.9, 5.1])
            # ax.set_xticks(range(1, 6))
            # ax.set_xticklabels(range(1, 6))
            plt.grid()

            f.canvas.draw()

        if True:
            # Mixture probabilities
            ax = utils.plot_mean_std_area(ratio_space, result_em_fits[:, 1], 0*result_em_fits[:, 1], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", linewidth=3, fmt='-', markersize=8, label='Target')
            utils.plot_mean_std_area(ratio_space, result_em_fits[:, 2], 0*result_em_fits[:, 2], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='-', markersize=8, label='Nontarget')
            utils.plot_mean_std_area(ratio_space, result_em_fits[:, 3], 0*result_em_fits[:, 3], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='-', markersize=8, label='Random')

            ax.legend(loc='right')

            # ax.set_xlim([0.9, 5.1])
            # ax.set_xticks(range(1, 6))
            # ax.set_xticklabels(range(1, 6))
            plt.grid()

            if savefigs:
                dataio.save_current_figure('results_misbinding_emmixture_allratioconj_{label}_global_{unique_id}.pdf')

        if True:
            # Kappa
            # ax = utils.plot_mean_std_area(ratio_space, result_em_fits[:, 0], 0*result_em_fits[:, 0], xlabel='Proportion of conjunctive units', ylabel="$\kappa [rad^{-2}]$", linewidth=3, fmt='-', markersize=8, label='Kappa')
            ax = utils.plot_mean_std_area(ratio_space, utils.kappa_to_stddev(result_em_fits[:, 0]), 0*result_em_fits[:, 2], xlabel='Proportion of conjunctive units', ylabel="Standard deviation [rad]", linewidth=3, fmt='-', markersize=8, label='Mixture model $\kappa$')

            # Add Fisher Info theo
            ax = utils.plot_mean_std_area(ratio_space, utils.kappa_to_stddev(result_fisherinfo_ratio), 0*result_em_fits[:, 2], xlabel='Proportion of conjunctive units', ylabel="Standard deviation [rad]", linewidth=3, fmt='-', markersize=8, label='Fisher Information', ax_handle=ax)

            ax.legend(loc='best')

            # ax.set_xlim([0.9, 5.1])
            # ax.set_xticks(range(1, 6))
            # ax.set_xticklabels(range(1, 6))
            plt.grid()

            if savefigs:
                dataio.save_current_figure('results_misbinding_kappa_allratioconj_{label}_global_{unique_id}.pdf')

    if compute_plot_bootstrap:
        ## Compute the bootstrap pvalue for each ratio
        #       use the bootstrap CDF from mixed runs, not the exact current ones, not sure if good idea.

        bootstrap_to_load = 1
        if bootstrap_to_load == 1:
            cache_bootstrap_fn = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'outputs', 'cache_bootstrap_mixed_from_bootstrapnontargets.pickle')
            bootstrap_ecdf_sum_label = 'bootstrap_ecdf_allitems_sum_sigmax_T'
            bootstrap_ecdf_all_label = 'bootstrap_ecdf_allitems_all_sigmax_T'
        elif bootstrap_to_load == 2:
            cache_bootstrap_fn = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'outputs', 'cache_bootstrap_misbinding_mixed.pickle')
            bootstrap_ecdf_sum_label = 'bootstrap_ecdf_allitems_sum_ratioconj'
            bootstrap_ecdf_all_label = 'bootstrap_ecdf_allitems_all_ratioconj'

        try:
            with open(cache_bootstrap_fn, 'r') as file_in:
                # Load and assign values
                cached_data = pickle.load(file_in)
                assert bootstrap_ecdf_sum_label in cached_data
                assert bootstrap_ecdf_all_label in cached_data
                should_fit_bootstrap = False

        except IOError:
            print "Error while loading ", cache_bootstrap_fn

        # Select the ECDF to use
        if bootstrap_to_load == 1:
            sigmax_i = 3    # corresponds to sigmax = 2, input here.
            T_i = 1         # two possible targets here.
            bootstrap_ecdf_sum_used = cached_data[bootstrap_ecdf_sum_label][sigmax_i][T_i]['ecdf']
            bootstrap_ecdf_all_used = cached_data[bootstrap_ecdf_all_label][sigmax_i][T_i]['ecdf']
        elif bootstrap_to_load == 2:
            ratio_conj_i = 4
            bootstrap_ecdf_sum_used = cached_data[bootstrap_ecdf_sum_label][ratio_conj_i]['ecdf']
            bootstrap_ecdf_all_used = cached_data[bootstrap_ecdf_all_label][ratio_conj_i]['ecdf']


        result_pvalue_bootstrap_sum = np.empty(ratio_space.size)*np.nan
        result_pvalue_bootstrap_all = np.empty((ratio_space.size, nontarget_angles.shape[-1]))*np.nan

        for ratio_conj_i, ratio_conj in enumerate(ratio_space):
            print "Ratio: ", ratio_conj

            responses = result_all_thetas[ratio_conj_i]

            bootstrap_allitems_nontargets_allitems_uniquekappa = em_circularmixture_allitems_uniquekappa.bootstrap_nontarget_stat(responses, target_angle, nontarget_angles,
                sumnontargets_bootstrap_ecdf=bootstrap_ecdf_sum_used,
                allnontargets_bootstrap_ecdf=bootstrap_ecdf_all_used)

            result_pvalue_bootstrap_sum[ratio_conj_i] = bootstrap_allitems_nontargets_allitems_uniquekappa['p_value']
            result_pvalue_bootstrap_all[ratio_conj_i] = bootstrap_allitems_nontargets_allitems_uniquekappa['allnontarget_p_value']

        ## Plots
        # f, ax = plt.subplots()
        # ax.plot(ratio_space, result_pvalue_bootstrap_all, linewidth=2)

        # if savefigs:
        #     dataio.save_current_figure("pvalue_bootstrap_all_ratioconj_{label}_{unique_id}.pdf")

        f, ax = plt.subplots()
        ax.plot(ratio_space, result_pvalue_bootstrap_sum, linewidth=2)
        plt.grid()

        if savefigs:
            dataio.save_current_figure("pvalue_bootstrap_sum_ratioconj_{label}_{unique_id}.pdf")


    # plt.figure()
    # plt.plot(ratio_MMlower, results_filtered_smoothed/np.max(results_filtered_smoothed, axis=0), linewidth=2)
    # plt.plot(ratio_MMlower[np.argmax(results_filtered_smoothed, axis=0)], np.ones(results_filtered_smoothed.shape[-1]), 'ro', markersize=10)
    # plt.grid()
    # plt.ylim((0., 1.1))
    # plt.subplots_adjust(right=0.8)
    # plt.legend(['%d item' % i + 's'*(i>1) for i in xrange(1, T+1)], loc='center right', bbox_to_anchor=(1.3, 0.5))
    # plt.xticks(np.linspace(0, 1.0, 5))

    variables_to_save = ['target_angle', 'nontarget_angles']

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)
        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='misbindings')


    plt.show()

    return locals()
def launcher_do_memorycurve_theoretical_pbs_theoonly(args):
    '''
        Compute Fisher info for T objects.

        Get the theoretical FI and the posterior variance estimate as well
    '''

    all_parameters = vars(args)

    dataio = DataIO(output_folder=args.output_directory, label=args.label)
    variables_to_save = ['rcscale_space', 'sigma_space', 'M_space', 'T_space', 'FI_rc_theo_mult', 'repet_i', 'num_repetitions', 'use_theoretical_cov']

    save_every = 5
    run_counter = 0
    use_theoretical_cov = True
    print "Use theo cov: %d" % use_theoretical_cov

    num_repetitions = all_parameters['num_repetitions']
    check_theoretical_cov = False
    do_curvature = False
    do_precision = False
    do_var = False

    rcscale_space = np.linspace(all_parameters['rc_scale'], all_parameters['rc_scale'], 1.)
    sigma_space = np.linspace(all_parameters['sigmax'], all_parameters['sigmax'], 1.)
    T_space = np.arange(1, all_parameters['T']+1)

    # M_space = np.array([all_parameters['M']])
    # M_space = np.arange(5, 30, 3, dtype=int)**2.  # Ease the load on PBS...
    # M_space = np.floor(np.linspace(25, 900, 49)).astype(int)
    M_space = np.arange(5, 22, dtype=int)**2.

    FI_rc_theo_mult = np.nan*np.empty((rcscale_space.size, sigma_space.size, M_space.size, T_space.size, num_repetitions), dtype=float)

    if do_curvature:
        variables_to_save.append('FI_rc_curv_mult')
        FI_rc_curv_mult = np.nan*np.empty((rcscale_space.size, sigma_space.size, M_space.size, T_space.size, 2, num_repetitions), dtype=float)
    if do_precision:
        variables_to_save.append('FI_rc_precision_mult')
        FI_rc_precision_mult = np.nan*np.empty((rcscale_space.size, sigma_space.size, M_space.size, T_space.size, num_repetitions), dtype=float)
    if do_var:
        variables_to_save.append('FI_rc_var_mult')
        FI_rc_var_mult = np.nan*np.empty((rcscale_space.size, sigma_space.size, M_space.size, T_space.size, 2, num_repetitions), dtype=float)

    # Show the progress in a nice way
    search_progress = progress.Progress(rcscale_space.size*sigma_space.size*T_space.size*M_space.size*num_repetitions)

    print rcscale_space
    print sigma_space
    print M_space
    print T_space

    for repet_i in xrange(num_repetitions):
        for j, sigma in enumerate(sigma_space):
            for i, rc_scale in enumerate(rcscale_space):
                for m_i, M in enumerate(M_space):
                    for t_i, t in enumerate(T_space):
                        ### Estimate the Fisher Information
                        print "FI T effect, T: %d/%d, rcscale %.3f, sigma %.3f, M %d, (%d/%d). %.2f%%, %s left - %s" % (t, T_space[-1], rc_scale, sigma, M, repet_i+1, num_repetitions, search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

                        # Current parameter values
                        all_parameters['rc_scale']  = rc_scale
                        all_parameters['sigmax']    = sigma
                        all_parameters['M']         = M
                        all_parameters['T']         = t

                        ### WORK UNIT

                        # Fisher info
                        ###
                        if use_theoretical_cov:
                            if check_theoretical_cov:
                                (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters)

                                computed_cov = random_network.compute_covariance_KL(sigma_2=(all_parameters['sigmax']**2. + all_parameters['sigmay']**2.), T=t, beta=1.0, precision=50)

                                cov_div = np.mean((sampler.noise_covariance-computed_cov)**2.)
                                if cov_div > 0.00001:
                                    print cov_div
                                    print all_parameters

                                    pcolor_2d_data(computed_cov)
                                    pcolor_2d_data(sampler.noise_covariance)
                                    plt.show()

                                    raise ValueError('Big divergence between measured and theoretical divergence!')
                            else:
                                random_network = init_random_network(all_parameters)

                                computed_cov = random_network.compute_covariance_KL(sigma_2=(all_parameters['sigmax']**2. + all_parameters['sigmay']**2.), T=t, beta=1.0, precision=50)


                            # Estimate the fisher information here only
                            print "theoretical FI"
                            FI_rc_theo_mult[i, j, m_i, t_i, repet_i] = random_network.compute_fisher_information(stimulus_input=(0.0, 0.0), cov_stim=computed_cov)
                            print FI_rc_theo_mult[i, j, m_i, t_i, repet_i]

                        else:
                            (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters)
                            computed_cov = sampler.noise_covariance
                            # computed_cov = stat_meas.model_parameters['covariances'][-1, 0]

                            # Fisher info
                            print "theoretical FI"
                            FI_rc_theo_mult[i, j, m_i, t_i, repet_i] = random_network.compute_fisher_information(stimulus_input=(0.0, 0.0), cov_stim=computed_cov)
                            print FI_rc_theo_mult[i, j, m_i, t_i, repet_i]

                            # Estimate the rest, possible here.
                            if do_curvature:
                                print "from curvature..."
                                fi_curv_dict = sampler.estimate_fisher_info_from_posterior_avg_randomsubset(subset_size=20, num_points=1000, full_stats=True)
                                (FI_rc_curv_mult[i, j, m_i, t_i, 0, repet_i], FI_rc_curv_mult[i, j, m_i, t_i, 1, repet_i]) = (fi_curv_dict['mean'], fi_curv_dict['std'])
                                print FI_rc_curv_mult[i, j, m_i, t_i, :, repet_i]

                            if do_var:
                                print "from variance of posterior..."
                                fi_var_dict = sampler.estimate_precision_from_posterior_avg_randomsubset(subset_size=20, num_points=1000, full_stats=True)
                                FI_rc_var_mult[i, j, m_i, t_i, 0, repet_i], FI_rc_var_mult[i, j, m_i, t_i, 1, repet_i] = (fi_var_dict['mean'], fi_var_dict['std'])
                                print FI_rc_var_mult[i, j, m_i, t_i, :, repet_i]

                            if do_precision:
                                print "from precision of recall..."
                                if all_parameters['inference_method'] == 'sample':
                                    # Sample thetas
                                    sampler.sample_theta(num_samples=all_parameters['num_samples'], burn_samples=100, selection_method=all_parameters['selection_method'], selection_num_samples=all_parameters['selection_num_samples'], integrate_tc_out=False, debug=False)
                                elif all_parameters['inference_method'] == 'max_lik':
                                    # Just use the ML value for the theta
                                    sampler.set_theta_max_likelihood(num_points=200, post_optimise=True)

                                FI_rc_precision_mult[i, j, m_i, t_i, repet_i] = sampler.get_precision()
                                print FI_rc_precision_mult[i, j, m_i, t_i, repet_i]

                        ### DONE WORK UNIT

                        search_progress.increment()

                        if run_counter % save_every == 0 or search_progress.done():
                            dataio.save_variables(variables_to_save, locals())

                        run_counter += 1
    return locals()
def launcher_do_memory_curve_simult(args):
    '''
        Get the memory curves, for 1...T objects, simultaneous presentation
        (will force alpha=1, and only recall one object for each T, more independent)
    '''

    # Should collect all responses?
    collect_responses = False

    # Should compute Fisher info?
    fisher_info = True

    # Build the random network
    alpha = 1.
    time_weights_parameters = dict(weighting_alpha=alpha, weighting_beta=1.0, specific_weighting=0.1, weight_prior='uniform')

    # Initialise the output file
    dataio = DataIO(output_folder=args.output_directory, label=args.label)
    output_string = dataio.filename

    all_parameters = vars(args)

    # List of variables to save
    if collect_responses:
        variables_to_output = ['all_precisions', 'args', 'num_repetitions', 'output_string', 'power_law_params', 'repet_i', 'all_responses', 'all_targets', 'all_nontargets', 'results_fi', 'results_fi_largen']
    else:
        variables_to_output = ['all_precisions', 'args', 'num_repetitions', 'output_string', 'power_law_params', 'repet_i', 'results_fi', 'results_fi_largen']

    print "Doing do_multiple_memory_curve"
    print "max_T: %s" % args.T
    print "File: %s" % output_string

    all_precisions = np.nan*np.empty((args.T, args.num_repetitions))
    results_fi = np.nan*np.empty((args.T, args.num_repetitions))
    results_fi_largen = np.nan*np.empty((args.T, args.num_repetitions))

    power_law_params = np.nan*np.empty(2)

    if collect_responses:
        all_responses = np.nan*np.empty((args.T, args.num_repetitions, args.N))
        all_targets = np.nan*np.empty((args.T, args.num_repetitions, args.N))
        all_nontargets = np.nan*np.empty((args.T, args.num_repetitions, args.N, args.T-1))

    # Construct different datasets, with t objects
    for repet_i in xrange(args.num_repetitions):

        for t in xrange(args.T):

            #### Get multiple examples of precisions, for different number of neurons. #####
            all_parameters['T'] = t+1

            # Init everything
            (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters)

            print "  doing T=%d %d/%d" % (t+1, repet_i+1, args.num_repetitions)

            if args.inference_method == 'sample':
                # Sample thetas
                sampler.sample_theta(num_samples=args.num_samples, burn_samples=100, selection_method=args.selection_method, selection_num_samples=args.selection_num_samples, integrate_tc_out=False, debug=False)
            elif args.inference_method == 'max_lik':
                # Just use the ML value for the theta
                sampler.set_theta_max_likelihood(num_points=200, post_optimise=True)
            else:
                raise ValueError('Wrong value for inference_method')

            # Save the precision
            all_precisions[t, repet_i] = sampler.get_precision(remove_chance_level=False, correction_theo_fit=1.0)
            # all_precisions[t, repet_i] = 1./sampler.compute_angle_error()['std']

            print "-> %.5f" % all_precisions[t, repet_i]

            # Collect responses if needed
            if collect_responses:
                (all_responses[t, repet_i], all_targets[t, repet_i], all_nontargets[t, repet_i, :, :t]) = sampler.collect_responses()

            # Compute Fisher information as well
            if fisher_info:
                results_fi[t, repet_i] = random_network.compute_fisher_information(cov_stim=sampler.noise_covariance, kappa_different=True)
                results_fi_largen[t, repet_i] = np.mean(random_network.compute_fisher_information_theoretical(sigma=all_parameters['sigmax'] + all_parameters['sigmay'], kappa1=random_network.neurons_sigma[:, 0], kappa2=random_network.neurons_sigma[:, 1]))

            # Save to disk, unique filename
            dataio.save_variables(variables_to_output, locals())

        if args.T > 1:
            xx = np.tile(np.arange(1, args.T+1, dtype='float'), (repet_i+1, 1)).T
            power_law_params = fit_powerlaw(xx, all_precisions[:, :(repet_i+1)], should_plot=True)

        print '====> Power law fits: exponent: %.4f, bias: %.4f' % (power_law_params[0], power_law_params[1])

        # Save to disk, unique filename
        dataio.save_variables(variables_to_output, locals())

    print all_precisions


    # Save to disk, unique filename
    dataio.save_variables(variables_to_output, locals())


    f = plt.figure()
    ax = f.add_subplot(111)
    ax = plot_mean_std_area(np.arange(1, args.T+1), np.mean(all_precisions, 1), np.std(all_precisions, 1), linewidth=2, ax_handle=ax, fmt='o-', markersize=10)
    ax = plot_mean_std_area(np.arange(1, args.T+1), np.mean(results_fi, 1), np.std(results_fi, 1), linewidth=2, ax_handle=ax, fmt='o-', markersize=10)
    # ax = plot_mean_std_area(np.arange(args.T), np.mean(results_fi_largen, 1), np.std(results_fi_largen, 1), ax_handle=ax)
    ax.set_xlabel('Number of objects')
    ax.set_ylabel('Precision [rad]')
    plt.legend(['Precision of samples', 'Fisher Information'])
    plt.xticks([1, 2, 3, 4, 5])
    plt.xlim((0.9, 5.1))

    dataio.save_current_figure('memory_curve_precision_fisherinfo-{label}-{unique_id}.pdf')
    print "Done: %s" % output_string
    return locals()
def launcher_do_memorycurve_theoretical(args):
    '''
        Compute the FI for different number of items (T)

        Should have a 1/x dependence on T, power law of exponent -1.
    '''

    all_parameters = vars(args)
    data_to_plot = {}

    dataio = DataIO(output_folder=args.output_directory, label=args.label)
    variables_to_save = ['rcscale_space', 'sigma_space', 'T_space', 'FI_rc_curv_mult', 'FI_rc_var_mult', 'FI_rc_precision_mult', 'FI_rc_theo_mult', 'repet_i', 'num_repetitions']

    save_every = 5
    run_counter = 0
    use_theoretical_cov = all_parameters['use_theoretical_cov']
    print "Use theo cov: %d" % use_theoretical_cov

    num_repetitions = all_parameters['num_repetitions']
    check_theoretical_cov = False
    do_curvature = False
    do_precision = True
    do_var = True

    # rcscale_space = np.linspace(0.5, 15.0, 21.)
    # rcscale_space = np.linspace(0.01, 15., 21.)
    # rcscale_space = np.linspace(4.0, 4.0, 1.)
    rcscale_space = np.linspace(all_parameters['rc_scale'], all_parameters['rc_scale'], 1.)

    # sigma_space = np.linspace(0.15, 0.3, 10.)
    # sigma_space = np.linspace(0.1, 0.1, 1.0)
    sigma_space = np.linspace(all_parameters['sigmax'], all_parameters['sigmax'], 1.)

    T_space = np.arange(1, all_parameters['T']+1)

    FI_rc_curv_mult = np.nan*np.empty((rcscale_space.size, sigma_space.size, T_space.size, 2, num_repetitions), dtype=float)
    FI_rc_var_mult = np.nan*np.empty((rcscale_space.size, sigma_space.size, T_space.size, 2, num_repetitions), dtype=float)
    FI_rc_precision_mult = np.nan*np.empty((rcscale_space.size, sigma_space.size, T_space.size, num_repetitions), dtype=float)
    FI_rc_theo_mult = np.nan*np.empty((rcscale_space.size, sigma_space.size, T_space.size, num_repetitions), dtype=float)

    # Show the progress in a nice way
    search_progress = progress.Progress(rcscale_space.size*sigma_space.size*T_space.size*num_repetitions)

    print rcscale_space
    print sigma_space

    for repet_i in xrange(num_repetitions):
        for j, sigma in enumerate(sigma_space):
            for i, rc_scale in enumerate(rcscale_space):
                for t_i, t in enumerate(T_space):

                    ### Estimate the Fisher Information
                    print "FI T effect, T: %d/%d, rcscale %.3f, sigma %.3f (%d/%d). %.2f%%, %s left - %s" % (t, T_space[-1], rc_scale, sigma, repet_i+1, num_repetitions, search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

                    # Current parameter values
                    all_parameters['rc_scale']  = rc_scale
                    all_parameters['sigmax']    = sigma
                    all_parameters['T']         = t

                    ### WORK UNIT

                    # Fisher info
                    ###
                    if use_theoretical_cov:
                        if check_theoretical_cov:
                            (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters)

                            computed_cov = random_network.compute_covariance_KL(sigma_2=(all_parameters['sigmax']**2. + all_parameters['sigmay']**2.), T=t, beta=1.0, precision=50)

                            cov_div = np.mean((sampler.noise_covariance-computed_cov)**2.)
                            if cov_div > 0.00001:
                                print cov_div
                                print all_parameters

                                pcolor_2d_data(computed_cov)
                                pcolor_2d_data(sampler.noise_covariance)
                                plt.show()

                                raise ValueError('Big divergence between measured and theoretical divergence!')
                        else:
                            random_network = launchers.init_random_network(all_parameters)

                            computed_cov = random_network.compute_covariance_KL(sigma_2=(all_parameters['sigmax']**2. + all_parameters['sigmay']**2.), T=t, beta=1.0, precision=50)


                        # Estimate the fisher information here only
                        print "theoretical FI"
                        FI_rc_theo_mult[i, j, t_i, repet_i] = random_network.compute_fisher_information(stimulus_input=(0.0, 0.0), cov_stim=computed_cov, kappa_different=True)
                        print FI_rc_theo_mult[i, j, t_i, repet_i]

                    else:
                        (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters)
                        computed_cov = sampler.noise_covariance
                        # computed_cov = stat_meas.model_parameters['covariances'][-1, 0]

                        # Fisher info
                        print "theoretical FI"
                        FI_rc_theo_mult[i, j, t_i, repet_i] = random_network.compute_fisher_information(stimulus_input=(0.0, 0.0), cov_stim=computed_cov, kappa_different=True)
                        # print FI_rc_theo_mult[i, j, t_i, repet_i]
                        print FI_rc_theo_mult

                        # Estimate the rest, possible here.
                        if do_curvature:
                            print "from curvature..."
                            fi_curv_dict = sampler.estimate_fisher_info_from_posterior_avg_randomsubset(subset_size=20, num_points=1000, full_stats=True)
                            (FI_rc_curv_mult[i, j, t_i, 0, repet_i], FI_rc_curv_mult[i, j, t_i, 1, repet_i]) = (fi_curv_dict['mean'], fi_curv_dict['std'])
                            print FI_rc_curv_mult[i, j, t_i, :, repet_i]

                        if do_var:
                                print "from variance of posterior..."
                                fi_var_dict = sampler.estimate_precision_from_posterior_avg_randomsubset(subset_size=20, num_points=1000, full_stats=True)
                                FI_rc_var_mult[i, j, t_i, 0, repet_i], FI_rc_var_mult[i, j, t_i, 1, repet_i] = (fi_var_dict['mean'], fi_var_dict['std'])
                                # print FI_rc_var_mult[i, j, t_i, :, repet_i]
                                print FI_rc_var_mult[:, :, :, 0, :]

                        if do_precision:
                            print "from precision of recall..."
                            if all_parameters['inference_method'] == 'sample':
                                # Sample thetas
                                sampler.sample_theta(num_samples=all_parameters['num_samples'], burn_samples=100, selection_method=all_parameters['selection_method'], selection_num_samples=all_parameters['selection_num_samples'], integrate_tc_out=False, debug=False)
                            elif all_parameters['inference_method'] == 'max_lik':
                                # Just use the ML value for the theta
                                sampler.set_theta_max_likelihood(num_points=200, post_optimise=True)

                            FI_rc_precision_mult[i, j, t_i, repet_i] = sampler.get_precision()
                            # print FI_rc_precision_mult[i, j, t_i, repet_i]
                            print FI_rc_precision_mult

                    ### DONE WORK UNIT

                    search_progress.increment()

                    if run_counter % save_every == 0 or search_progress.done():
                        dataio.save_variables(variables_to_save, locals())

                        # plots
                        for curr_data in variables_to_save:
                            data_to_plot[curr_data] = locals()[curr_data]

                        # plots_fisher_info_param_search(data_to_plot, dataio)

                    run_counter += 1


    return locals()
def launcher_check_fisher_fit_1obj_2016(args):
  print "Doing a piece of work for launcher_check_fisher_fit_1obj_2016"

  all_parameters = utils.argparse_2_dict(args)
  print all_parameters

  if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
    print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

  if 'plots_during_simulation_callback' in all_parameters:
    plots_during_simulation_callback = all_parameters[
        'plots_during_simulation_callback']
    del all_parameters['plots_during_simulation_callback']
  else:
    plots_during_simulation_callback = None

  # Create DataIO
  #  (complete label with current variable state)
  dataio = DataIO.DataIO(
      output_folder=all_parameters['output_directory'],
      label=all_parameters['label'].format(**all_parameters))
  save_every = 1
  run_counter = 0

  # Result arrays
  result_all_precisions = np.nan * np.empty(
      (all_parameters['num_repetitions']), dtype=float)
  result_FI_rc_curv = np.nan * np.empty(
      (all_parameters['N'], all_parameters['num_repetitions']), dtype=float)
  result_FI_rc_theo = np.nan * np.empty(
      (all_parameters['N'], all_parameters['num_repetitions']), dtype=float)
  result_FI_rc_theocov = np.nan * np.empty(
      (all_parameters['N'], all_parameters['num_repetitions']), dtype=float)
  result_FI_rc_theo_circulant = np.nan * np.empty(
      (all_parameters['N'], all_parameters['num_repetitions']), dtype=float)
  result_FI_rc_theo_largeN = np.nan * np.empty(
      (all_parameters['num_repetitions']), dtype=float)
  result_marginal_inv_FI = np.nan * np.ones(
      (2, all_parameters['num_repetitions']))
  result_marginal_FI = np.nan * np.ones((2, all_parameters['num_repetitions']))

  result_em_fits = np.nan * np.empty((6, all_parameters['num_repetitions']))

  search_progress = progress.Progress(all_parameters['num_repetitions'])

  for repet_i in xrange(all_parameters['num_repetitions']):
    print "%.2f%%, %s left - %s" % (search_progress.percentage(),
                                    search_progress.time_remaining_str(),
                                    search_progress.eta_str())

    print "Fisher Info check, rep %d/%d" % (repet_i + 1,
                                            all_parameters['num_repetitions'])

    ### WORK WORK WORK work? ###

    # Instantiate
    (_, _, _, sampler) = launchers.init_everything(all_parameters)

    # Sample
    sampler.run_inference(all_parameters)

    # Compute precision
    print "get precision..."
    result_all_precisions[repet_i] = sampler.get_precision()

    # Theoretical Fisher info
    if all_parameters['code_type'] != 'hierarchical':
      print "theoretical FI"
      result_FI_rc_theo[:, repet_i] = (
          sampler.estimate_fisher_info_theocov(use_theoretical_cov=False))
      result_FI_rc_theocov[:, repet_i] = (
          sampler.estimate_fisher_info_theocov(use_theoretical_cov=True))
      result_FI_rc_theo_largeN[repet_i] = (
          sampler.estimate_fisher_info_theocov_largen(use_theoretical_cov=True)
      )
      result_FI_rc_theo_circulant[:, repet_i] = (
          sampler.estimate_fisher_info_circulant())
    # Fisher Info from curvature
    print "Compute fisher from curvature"
    fi_curv_dict = sampler.estimate_fisher_info_from_posterior_avg(
        num_points=500, full_stats=True)
    result_FI_rc_curv[:, repet_i] = fi_curv_dict['all']

    # Fit mixture model
    print "fit mixture model..."
    curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
    curr_params_fit['mixt_nontargets_sum'] = np.sum(
        curr_params_fit['mixt_nontargets'])
    result_em_fits[..., repet_i] = [
        curr_params_fit[key]
        for key in ('kappa', 'mixt_target', 'mixt_nontargets_sum',
                    'mixt_random', 'train_LL', 'bic')
    ]

    # Compute marginal inverse fisher info
    print "compute marginal inverse fisher info"
    marginal_fi_dict = sampler.estimate_marginal_inverse_fisher_info_montecarlo(
    )
    result_marginal_inv_FI[:, repet_i] = [
        marginal_fi_dict[key] for key in ('inv_FI', 'inv_FI_std')
    ]
    result_marginal_FI[:, repet_i] = [
        marginal_fi_dict[key] for key in ('FI', 'FI_std')
    ]

    ## Run callback function if exists
    if plots_during_simulation_callback:
      print "Doing plots..."
      try:
        # Best super safe, if this fails then the simulation must continue!
        plots_during_simulation_callback['function'](
            locals(), plots_during_simulation_callback['parameters'])
        print "plots done."
      except Exception:
        print "error during plotting callback function", plots_during_simulation_callback[
            'function'], plots_during_simulation_callback['parameters']

    ### /Work ###
    search_progress.increment()
    if run_counter % save_every == 0 or search_progress.done():
      dataio.save_variables_default(locals())
    run_counter += 1

  # Finished
  dataio.save_variables_default(locals())

  print "All finished"
  return locals()
def launcher_do_hierarchical_precision_M_Mlower_pbs(args):
    '''
        Compare the evolution of the precision curve as the number of neurons in a hierarchical network increases.
    '''


    print "Doing a piece of work for launcher_do_hierarchical_precision_M_Mlower_pbs"
    save_all_output = True

    try:
        # Convert Argparse.Namespace to dict
        all_parameters = vars(args)
    except TypeError:
        # Assume it's already done
        assert type(args) is dict, "args is neither Namespace nor dict, WHY?"
        all_parameters = args

    code_type = 'hierarchical'

    dataio = DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'])
    variables_to_save = ['repet_i', 'num_repetitions']

    save_every = 5
    run_counter = 0

    num_repetitions = all_parameters['num_repetitions']

    M_space = np.array([all_parameters['M']])
    M_lower_space = np.array([all_parameters['M_layer_one']])
    T_space = np.arange(1, all_parameters['T']+1)

    results_precision_M_T = np.nan*np.empty((M_space.size, M_lower_space.size, T_space.size, num_repetitions), dtype=float)
    results_emfits_M_T = np.nan*np.empty((M_space.size, M_lower_space.size, T_space.size, 5, num_repetitions), dtype=float)

    if save_all_output:
        result_responses = np.nan*np.empty((M_space.size, M_lower_space.size, T_space.size, all_parameters['N'], num_repetitions))
        result_targets = np.nan*np.empty((M_space.size, M_lower_space.size, T_space.size, all_parameters['N'], num_repetitions))
        result_nontargets = np.nan*np.empty((M_space.size, M_lower_space.size, T_space.size, all_parameters['N'], all_parameters['T']-1, num_repetitions))

    # Show the progress
    search_progress = progress.Progress(T_space.size*M_space.size*M_lower_space.size*num_repetitions)

    print M_space
    print M_lower_space
    print T_space

    for repet_i in xrange(num_repetitions):
        for m_i, M in enumerate(M_space):
            for m_l_i, M_layer_one in enumerate(M_lower_space):
                for t_i, t in enumerate(T_space):
                    # Will estimate the precision

                    print "Precision as function of N, hierarchical network, T: %d/%d, M %d, M_layer_one %d, (%d/%d). %.2f%%, %s left - %s" % (t, T_space[-1], M, M_layer_one, repet_i+1, num_repetitions, search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

                    # Current parameter values
                    all_parameters['M']             = M
                    all_parameters['T']             = t
                    all_parameters['code_type']     = code_type
                    all_parameters['M_layer_one']   = M_layer_one

                    ### WORK UNIT
                    (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters)

                    # Sample / max like
                    sampler.run_inference(all_parameters)

                    print 'get precision...'
                    results_precision_M_T[m_i, m_l_i, t_i, repet_i] = sampler.get_precision()
                    print results_precision_M_T[m_i, m_l_i, t_i, repet_i]

                    print "fit mixture model..."
                    curr_params_fit = sampler.fit_mixture_model(use_all_targets=True)
                    curr_params_fit['mixt_nontargets_sum'] = np.sum(curr_params_fit['mixt_nontargets'])
                    results_emfits_M_T[m_i, m_l_i, t_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL')]

                    if save_all_output:
                        (result_responses[m_i, m_l_i, t_i, :, repet_i], result_targets[m_i, m_l_i, t_i, :, repet_i], result_nontargets[m_i, m_l_i, t_i, :, :t_i, repet_i]) = sampler.collect_responses()

                    ### DONE WORK UNIT

                    search_progress.increment()

                    if run_counter % save_every == 0 or search_progress.done():
                        dataio.save_variables_default(locals(), variables_to_save)

                    run_counter += 1

    print "All finished"

    return locals()
Пример #21
0
def plots_fitmixtmodel_rcscale_effect(data_pbs, generator_module=None):
    '''
        Reload runs from PBS
    '''

    #### SETUP
    #
    savefigs = True
    savedata = True

    plots_all_T = True
    plots_per_T = True

    # do_relaunch_bestparams_pbs = True

    colormap = None  # or 'cubehelix'
    plt.rcParams['font.size'] = 16
    #
    #### /SETUP

    print "Order parameters: ", data_pbs.dataset_infos['parameters']
    # parameters: M, ratio_conj, sigmax

    # Extract data
    T_space = data_pbs.loaded_data['datasets_list'][0]['T_space']

    result_em_fits_flat = np.array(data_pbs.dict_arrays['result_em_fits']['results_flat'])
    result_precisions_flat = np.array(data_pbs.dict_arrays['result_all_precisions']['results_flat'])
    result_dist_bays09_flat = np.array(data_pbs.dict_arrays['result_dist_bays09']['results_flat'])
    result_dist_gorgo11_flat = np.array(data_pbs.dict_arrays['result_dist_gorgo11']['results_flat'])
    result_dist_bays09_emmixt_KL = np.array(data_pbs.dict_arrays['result_dist_bays09_emmixt_KL']['results_flat'])
    result_dist_gorgo11_emmixt_KL = np.array(data_pbs.dict_arrays['result_dist_gorgo11_emmixt_KL']['results_flat'])
    result_parameters_flat = np.array(data_pbs.dict_arrays['result_em_fits']['parameters_flat'])

    rc_scale_space = data_pbs.loaded_data['parameters_uniques']['rc_scale']
    num_repetitions = generator_module.num_repetitions
    parameter_names_sorted = data_pbs.dataset_infos['parameters']

    dataio = DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename'])

    # Load bays09
    data_bays09 = load_experimental_data.load_data_bays09(fit_mixture_model=True)
    bays09_nitems = data_bays09['data_to_fit']['n_items']
    bays09_em_target = np.nan*np.empty((bays09_nitems.max(), 4))  #kappa, prob_target, prob_nontarget, prob_random
    bays09_em_target[bays09_nitems - 1] = data_bays09['em_fits_nitems_arrays']['mean'].T
    bays09_emmixt_target = bays09_em_target[:, 1:]


    ## Compute some stuff
    result_parameters_flat = result_parameters_flat.flatten()

    result_em_fits_all_avg = utils.nanmean(result_em_fits_flat, axis=-1)
    result_em_kappa_allT = result_em_fits_all_avg[..., 0]
    result_em_emmixt_allT = result_em_fits_all_avg[..., 1:4]

    result_precisions_all_avg = utils.nanmean(result_precisions_flat, axis=-1)

    # Square distance to kappa
    result_dist_bays09_allT_avg = utils.nanmean(result_dist_bays09_flat, axis=-1)
    result_dist_bays09_emmixt_KL_allT_avg = utils.nanmean(result_dist_bays09_emmixt_KL, axis=-1)

    result_dist_bays09_kappa_allT = result_dist_bays09_allT_avg[..., 0]

    # result_dist_bays09_allT_avg = utils.nanmean((result_em_fits_flat[:, :, :4] - bays09_em_target[np.newaxis, :, :, np.newaxis])**2, axis=-1)
    # result_dist_bays09_kappa_sum = np.nansum(result_dist_bays09_allT_avg[:, :, 0], axis=-1)

    # result_dist_bays09_kappa_T1_sum = result_dist_bays09_allT_avg[:, 0, 0]
    # result_dist_bays09_kappa_T25_sum = np.nansum(result_dist_bays09_allT_avg[:, 1:, 0], axis=-1)

    # # Square and KL distance for EM Mixtures
    # result_dist_bays09_emmixt_sum = np.nansum(np.nansum(result_dist_bays09_allT_avg[:, :, 1:], axis=-1), axis=-1)
    # result_dist_bays09_emmixt_T1_sum = np.nansum(result_dist_bays09_allT_avg[:, 0, 1:], axis=-1)
    # result_dist_bays09_emmixt_T25_sum = np.nansum(np.nansum(result_dist_bays09_allT_avg[:, 1:, 1:], axis=-1), axis=-1)


    # result_dist_bays09_emmixt_KL = utils.nanmean(utils.KL_div(result_em_fits_flat[:, :, 1:4], bays09_emmixt_target[np.newaxis, :, :, np.newaxis], axis=-2), axis=-1)   # KL over dimension of mixtures, then mean over repetitions
    # result_dist_bays09_emmixt_KL_sum = np.nansum(result_dist_bays09_emmixt_KL, axis=-1)  # sum over T
    # result_dist_bays09_emmixt_KL_T1_sum = result_dist_bays09_emmixt_KL[:, 0]
    # result_dist_bays09_emmixt_KL_T25_sum = np.nansum(result_dist_bays09_emmixt_KL[:, 1:], axis=-1)


    # result_dist_bays09_both_normalised = result_dist_bays09_emmixt_sum/np.max(result_dist_bays09_emmixt_sum) + result_dist_bays09_kappa_sum/np.max(result_dist_bays09_kappa_sum)

    # # Mask kappa for performance too bad
    # result_dist_bays09_kappa_sum_masked = np.ma.masked_greater(result_dist_bays09_kappa_sum, 2*np.median(result_dist_bays09_kappa_sum))
    # result_dist_bays09_emmixt_KL_sum_masked = np.ma.masked_greater(result_dist_bays09_emmixt_KL_sum, 2*np.median(result_dist_bays09_emmixt_KL_sum))
    # result_dist_bays09_both_normalised_mult_masked = 1-(1. - result_dist_bays09_emmixt_KL_sum/np.max(result_dist_bays09_emmixt_KL_sum))*(1. - result_dist_bays09_kappa_sum_masked/np.max(result_dist_bays09_kappa_sum_masked))

    # Compute optimal rc_scale
    all_args = data_pbs.loaded_data['args_list']
    specific_arg = all_args[0]
    specific_arg['autoset_parameters'] = True
    (_, _, _, sampler) = launchers.init_everything(specific_arg)
    optimal_rc_scale = sampler.random_network.rc_scale[0]

    if plots_all_T:
        # Show Kappa evolution wrt rc_scale
        f, ax = plt.subplots()
        # utils.plot_mean_std_from_samples(result_parameters_flat, np.nansum(result_em_kappa_allT, axis=-1), bins=60, bins_y=150, xlabel='rc_scale', ylabel='EM kappa', title='Kappa, summed T',  ax_handle=ax, show_scatter=False)
        utils.plot_mean_std_from_samples_rolling(result_parameters_flat, np.nansum(result_em_kappa_allT, axis=-1), window=35, xlabel='rc_scale', ylabel='EM kappa', title='Kappa, summed T',  ax_handle=ax, show_scatter=False)
        ax.axvline(x=optimal_rc_scale, color='g', linewidth=2)
        ax.axvline(x=2*optimal_rc_scale, color='r', linewidth=2)
        f.canvas.draw()

        if savefigs:
            dataio.save_current_figure('rcscaleeffect_kappa_summedT_{label}_{unique_id}.pdf')

        # Show Mixt proportions
        f, ax = plt.subplots()
        for i in xrange(3):
            # utils.plot_mean_std_from_samples(result_parameters_flat, np.nansum(result_em_emmixt_allT[..., i], axis=-1), bins=60, bins_y=100, xlabel='rc_scale', ylabel='EM mixt proportions', title='EM mixtures, summed T',  ax_handle=ax, show_scatter=False)
            utils.plot_mean_std_from_samples_rolling(result_parameters_flat, np.nansum(result_em_emmixt_allT[..., i], axis=-1), window=35, xlabel='rc_scale', ylabel='EM mixt proportions', title='EM mixtures, summed T',  ax_handle=ax, show_scatter=False)
        ax.axvline(x=optimal_rc_scale, color='g', linewidth=2)
        ax.axvline(x=2*optimal_rc_scale, color='r', linewidth=2)
        f.canvas.draw()

        if savefigs:
            dataio.save_current_figure('rcscaleeffect_mixtprop_summedT_{label}_{unique_id}.pdf')

        # Show Precision
        f, ax = plt.subplots()
        # utils.plot_mean_std_from_samples(result_parameters_flat, np.nansum(result_precisions_all_avg, axis=-1), bins=60, bins_y=150, xlabel='rc_scale', ylabel='Precision', title='Precision, summed T',  ax_handle=ax, show_scatter=False)
        utils.plot_mean_std_from_samples_rolling(result_parameters_flat, np.nansum(result_precisions_all_avg, axis=-1), window=35, xlabel='rc_scale', ylabel='Precision', title='Precision, summed T',  ax_handle=ax, show_scatter=False)
        ax.axvline(x=optimal_rc_scale, color='g', linewidth=2)
        ax.axvline(x=2*optimal_rc_scale, color='r', linewidth=2)
        f.canvas.draw()

        if savefigs:
            dataio.save_current_figure('rcscaleeffect_precision_summedT_{label}_{unique_id}.pdf')


        plt.close('all')


    if plots_per_T:
        for T_i, T in enumerate(T_space):
            # Show Kappa evolution wrt rc_scale
            f, ax = plt.subplots()
            # utils.plot_mean_std_from_samples(result_parameters_flat, result_em_kappa_allT[:, T_i], bins=40, bins_y=100, xlabel='rc_scale', ylabel='EM kappa', title='Kappa, T %d' % T,  ax_handle=ax, show_scatter=False)
            utils.plot_mean_std_from_samples_rolling(result_parameters_flat, result_em_kappa_allT[:, T_i], window=35, xlabel='rc_scale', ylabel='EM kappa', title='Kappa, T %d' % T,  ax_handle=ax, show_scatter=False)
            ax.axvline(x=optimal_rc_scale, color='g', linewidth=2)
            ax.axvline(x=2*optimal_rc_scale, color='r', linewidth=2)
            f.canvas.draw()

            if savefigs:
                dataio.save_current_figure('rcscaleeffect_kappa_T%d_{label}_{unique_id}.pdf' % T)

            # Show Mixt proportions
            f, ax = plt.subplots()
            for i in xrange(3):
                # utils.plot_mean_std_from_samples(result_parameters_flat, result_em_emmixt_allT[:, T_i, i], bins=40, bins_y=100, xlabel='rc_scale', ylabel='EM mixt proportions', title='EM mixtures, T %d' % T,  ax_handle=ax, show_scatter=False)
                utils.plot_mean_std_from_samples_rolling(result_parameters_flat, result_em_emmixt_allT[:, T_i, i], window=35, xlabel='rc_scale', ylabel='EM mixt proportions', title='EM mixtures, T %d' % T,  ax_handle=ax, show_scatter=False)
            ax.axvline(x=optimal_rc_scale, color='g', linewidth=2)
            ax.axvline(x=2*optimal_rc_scale, color='r', linewidth=2)
            f.canvas.draw()

            if savefigs:
                dataio.save_current_figure('rcscaleeffect_mixtprop_T%d_{label}_{unique_id}.pdf' % T)

            # Show Precision
            f, ax = plt.subplots()
            # utils.plot_mean_std_from_samples(result_parameters_flat, result_precisions_all_avg[:, T_i], bins=40, bins_y=100, xlabel='rc_scale', ylabel='Precision', title='Precision, T %d' % T,  ax_handle=ax, show_scatter=False)
            utils.plot_mean_std_from_samples_rolling(result_parameters_flat, result_precisions_all_avg[:, T_i], window=35, xlabel='rc_scale', ylabel='Precision', title='Precision, T %d' % T,  ax_handle=ax, show_scatter=False)
            ax.axvline(x=optimal_rc_scale, color='g', linewidth=2)
            ax.axvline(x=2*optimal_rc_scale, color='r', linewidth=2)
            f.canvas.draw()

            if savefigs:
                dataio.save_current_figure('rcscaleeffect_precision_T%d_{label}_{unique_id}.pdf' % T)

            plt.close('all')





    # # Interpolate
    # if plots_interpolate:

    #     sigmax_target = 0.9

    #     M_interp_space = np.arange(6, 625, 5)
    #     ratio_interp_space = np.linspace(0.01, 1.0, 50)
    #     # sigmax_interp_space = np.linspace(0.01, 1.0, 50)
    #     sigmax_interp_space = np.array([sigmax_target])
    #     params_crossspace = np.array(utils.cross(M_interp_space, ratio_interp_space, sigmax_interp_space))

    #     interpolated_data = rbf_interpolator(params_crossspace[:, 0], params_crossspace[:, 1], params_crossspace[:, 2]).reshape((M_interp_space.size, ratio_interp_space.size))

    #     utils.pcolor_2d_data(interpolated_data, M_interp_space, ratio_interp_space, 'M', 'ratio', 'interpolated, fixing sigmax= %.2f' % sigmax_target)

    #     points_closeby = ((result_parameters_flat[:, 2] - sigmax_target)**2)< 0.01
    #     plt.figure()
    #     # plt.imshow(interpolated_data, extent=(M_interp_space.min(), M_interp_space.max(), ratio_interp_space.min(), ratio_interp_space.max()))
    #     plt.imshow(interpolated_data)
    #     plt.scatter(result_parameters_flat[points_closeby, 0], result_parameters_flat[points_closeby, 1], s=100, c=result_fitexperiments_bic_avg[points_closeby], marker='o')


    # if plot_per_ratio:
    #     # Plot the evolution of loglike as a function of sigmax, with std shown
    #     for ratio_conj_i, ratio_conj in enumerate(ratio_space):
    #         ax = utils.plot_mean_std_area(sigmax_space, result_log_posterior_mean[ratio_conj_i], result_log_posterior_std[ratio_conj_i])

    #         ax.get_figure().canvas.draw()

    #         if savefigs:
    #             dataio.save_current_figure('results_fitexp_%s_loglike_ratioconj%.2f_{label}_global_{unique_id}.pdf' % (exp_dataset, ratio_conj))



    all_args = data_pbs.loaded_data['args_list']
    variables_to_save = ['parameter_names_sorted']

    if savedata:
        dataio.save_variables_default(locals(), variables_to_save)
        dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='rcscale_characterisation')


    plt.show()

    return locals()
def launcher_do_hierarchical_precision_M_sparsity_sigmaweight_feature(args):
    '''
        Compare the evolution of the precision curve as the sparsity, sigma and M change, for a hierarchical code with feature base
    '''

    all_parameters = vars(args)

    code_type = 'hierarchical'

    dataio = DataIO(output_folder=args.output_directory, label=args.label)
    # variables_to_save = ['M_space', 'T_space',  'repet_i', 'num_repetitions', 'results_precision_N', 'result_responses', 'result_targets', 'result_nontargets']
    variables_to_save = ['M_space', 'T_space', 'sparsity_space', 'sigma_weights_space', 'repet_i', 'num_repetitions', 'results_precision_N']

    save_every = 5
    run_counter = 0

    num_repetitions = all_parameters['num_repetitions']

    # M_space = np.array([all_parameters['M']])
    # M_space = np.array([4*4, 5*5, 7*7, 8*8, 9*9, 10*10, 15*15, 20*20])
    M_space = np.linspace(5, 500, 10)
    sparsity_space = np.linspace(0.01, 1.0, 10.)
    sigma_weights_space = np.linspace(0.1, 2.0, 10)
    T_space = np.arange(1, all_parameters['T']+1)

    results_precision_M_T = np.nan*np.empty((M_space.size, sparsity_space.size, sigma_weights_space.size, T_space.size, num_repetitions), dtype=float)
    # result_responses = np.nan*np.empty((M_space.size, sparsity_space.size, sigma_weights_space.size, T_space.size, num_repetitions, all_parameters['N']))
    # result_targets = np.nan*np.empty((M_space.size, sparsity_space.size, sigma_weights_space.size, T_space.size, num_repetitions, all_parameters['N']))
    # result_nontargets = np.nan*np.empty((M_space.size, sparsity_space.size, sigma_weights_space.size, T_space.size, num_repetitions, all_parameters['N'], all_parameters['T']-1))

    all_parameters['type_layer_one'] = 'feature'

    # Show the progress
    search_progress = progress.Progress(T_space.size*M_space.size*sigma_weights_space.size*sparsity_space.size*num_repetitions)

    print M_space
    print sparsity_space
    print sigma_weights_space
    print T_space

    for repet_i in xrange(num_repetitions):
        for m_i, M in enumerate(M_space):
            for s_i, sparsity in enumerate(sparsity_space):
                for sw_i, sigma_weights in enumerate(sigma_weights_space):
                    for t_i, t in enumerate(T_space):
                    # Will estimate the precision

                        print "Precision as function of N, hierarchical network, T: %d/%d, M %d, sparsity %.3f, weights: %.2f, (%d/%d). %.2f%%, %s left - %s" % (t, T_space[-1], M, sparsity, sigma_weights, repet_i+1, num_repetitions, search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

                        # Current parameter values
                        all_parameters['M']             = M
                        all_parameters['T']             = t
                        all_parameters['code_type']     = code_type
                        all_parameters['sparsity']      = sparsity
                        all_parameters['sigma_weights'] = sigma_weights

                        ### WORK UNIT
                        (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters)

                        if all_parameters['inference_method'] == 'sample':
                            # Sample thetas
                            sampler.sample_theta(num_samples=all_parameters['num_samples'], burn_samples=100, selection_method=all_parameters['selection_method'], selection_num_samples=all_parameters['selection_num_samples'], integrate_tc_out=False, debug=False)
                        elif all_parameters['inference_method'] == 'max_lik':
                            # Just use the ML value for the theta
                            sampler.set_theta_max_likelihood(num_points=150, post_optimise=True)

                        results_precision_M_T[m_i, s_i, sw_i, t_i, repet_i] = sampler.get_precision()
                        print results_precision_M_T[m_i, s_i, sw_i, t_i, repet_i]

                        # (result_responses[m_i, s_i, t_i, repet_i], result_targets[m_i, s_i, t_i, repet_i], result_nontargets[m_i, s_i, t_i, repet_i, :, :t_i]) = sampler.collect_responses()

                        ### DONE WORK UNIT

                        search_progress.increment()

                        if run_counter % save_every == 0 or search_progress.done():
                            dataio.save_variables(variables_to_save, locals())

                        run_counter += 1

    return locals()
Пример #23
0
def test_loglike_modelselection():
    '''
        Check if the LL computation is correct for model selection

        Use specific data, generated from a given model. This model should then have max LL.
    '''

    # Set some parameters and let the others default
    experiment_parameters = dict(action_to_do='launcher_do_simple_run',
                                 inference_method='sample',
                                 experiment_id='bays09',
                                 M=100,
                                 N=500,
                                 filter_datapoints_size=500,
                                 filter_datapoints_selection='random',
                                 num_samples=500,
                                 selection_method='last',
                                 sigmax=0.1,
                                 sigma_output=0.5,
                                 renormalize_sigmax=None,
                                 sigmay=0.0001,
                                 code_type='mixed',
                                 slice_width=0.07,
                                 burn_samples=200,
                                 ratio_conj=0.7,
                                 stimuli_generation_recall='random',
                                 autoset_parameters=None,
                                 label='test_fit_experimentallt'
                                 )
    experiment_launcher = experimentlauncher.ExperimentLauncher(run=True, arguments_dict=experiment_parameters)
    experiment_parameters_full = experiment_launcher.args_dict
    sampler = experiment_launcher.all_vars['sampler']

    # Keep its dataset and responses
    stimuli_correct_to_force = sampler.data_gen.stimuli_correct.copy()
    response_to_force = sampler.theta[:, 0].copy()
    LL_target = sampler.compute_loglikelihood()

    experiment_parameters_full['stimuli_to_use'] = stimuli_correct_to_force

    sigmaoutput_space = np.linspace(0.0, 1.0, 10)

    LL_all_new = np.empty(sigmaoutput_space.size)
    LL_all_conv_new = np.empty(sigmaoutput_space.size)

    for sigmaout_i, sigma_output in enumerate(sigmaoutput_space):

        experiment_parameters_full['sigma_output'] = sigma_output

        _, _, _, samplerbis = launchers.init_everything(experiment_parameters_full)

        # Set responses
        samplerbis.set_theta(response_to_force)

        # Compute LL
        LL_all_new[sigmaout_i] = samplerbis.compute_loglikelihood()
        LL_all_conv_new[sigmaout_i] = samplerbis.compute_loglikelihood_convolved_output_noise()


        # Print result
        print LL_all_new[sigmaout_i], LL_all_conv_new[sigmaout_i]

    print LL_target
    print sigma_output, LL_all_new, LL_all_conv_new
    print sigmaoutput_space[np.argmax(LL_all_new)]
    print sigmaoutput_space[np.argmax(LL_all_conv_new)]

    return locals()
def launcher_do_hierarchical_special_stimuli_varyMMlower(args):
    '''
        Fit Hierarchical model, varying the ratio of M to Mlower
        See how the precision of recall and mixture model parameters evolve
    '''

    print "Doing a piece of work for launcher_do_mixed_special_stimuli"

    try:
        # Convert Argparse.Namespace to dict
        all_parameters = vars(args)
    except TypeError:
        # Assume it's already done
        assert type(args) is dict, "args is neither Namespace nor dict, WHY?"
        all_parameters = args

    print all_parameters

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Parameters to vary
    M_space = np.arange(1, all_parameters['M']+1)
    M_lower_space = np.arange(2, all_parameters['M']+1, 2)
    MMlower_all = np.array(cross(M_space, M_lower_space))
    MMlower_valid_space = MMlower_all[np.nonzero(np.sum(MMlower_all, axis=1) == all_parameters['M'])[0]]

    # limit space, not too big...
    MMlower_valid_space = MMlower_valid_space[::5]
    print "MMlower size", MMlower_valid_space.shape[0]

    # Result arrays
    result_all_precisions = np.nan*np.ones((MMlower_valid_space.shape[0], all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.ones((MMlower_valid_space.shape[0], 5, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll
    result_em_resp = np.nan*np.ones((MMlower_valid_space.shape[0], 1+all_parameters['T'], all_parameters['N'], all_parameters['num_repetitions']))

    # If desired, will automatically save all Model responses.
    if all_parameters['subaction'] == 'collect_responses':
        result_responses = np.nan*np.ones((MMlower_valid_space.shape[0], all_parameters['N'], all_parameters['num_repetitions']))
        result_target = np.nan*np.ones((MMlower_valid_space.shape[0], all_parameters['N'], all_parameters['num_repetitions']))
        result_nontargets = np.nan*np.ones((MMlower_valid_space.shape[0], all_parameters['N'], all_parameters['T']-1, all_parameters['num_repetitions']))


    search_progress = progress.Progress(MMlower_valid_space.shape[0]*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for MMlower_i, MMlower in enumerate(MMlower_valid_space):
            print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

            print "Fit for M=%d, Mlower=%d, %d/%d" % (MMlower[0], MMlower[1], repet_i+1, all_parameters['num_repetitions'])

            # Update parameter
            all_parameters['M']             = MMlower[0]
            all_parameters['M_layer_one']   = MMlower[1]

            ### WORK WORK WORK work? ###

            # Generate specific stimuli
            all_parameters['stimuli_generation'] = 'specific_stimuli'
            all_parameters['code_type'] = 'hierarchical'

            # Instantiate
            (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters)

            # Sample
            sampler.run_inference(all_parameters)

            # Compute precision
            result_all_precisions[MMlower_i, repet_i] = sampler.get_precision()

            # Fit mixture model
            curr_params_fit = em_circularmixture.fit(*sampler.collect_responses())
            curr_resp = em_circularmixture.compute_responsibilities(*(sampler.collect_responses() + (curr_params_fit,) ))

            print curr_params_fit

            result_em_fits[MMlower_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random', 'train_LL')]
            result_em_resp[MMlower_i, 0, :, repet_i] = curr_resp['target']
            result_em_resp[MMlower_i, 1:-1, :, repet_i] = curr_resp['nontargets'].T
            result_em_resp[MMlower_i, -1, :, repet_i] = curr_resp['random']

            # If needed, store responses
            if all_parameters['subaction'] == 'collect_responses':
                (responses, target, nontarget) = sampler.collect_responses()
                result_responses[MMlower_i, :, repet_i] = responses
                result_target[MMlower_i, :, repet_i] = target
                result_nontargets[MMlower_i, ..., repet_i] = nontarget

                print "collected responses"

            ### /Work ###

            search_progress.increment()
            if run_counter % save_every == 0 or search_progress.done():
                dataio.save_variables_default(locals())
            run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def launcher_do_fit_mixturemodels(args):
    '''
        Run the model for 1..T items, computing:
        - Precision of samples
        - EM mixture model fits
        - Theoretical Fisher Information
        - EM Mixture model distances to set of currently working datasets.
    '''

    print "Doing a piece of work for launcher_do_fit_mixturemodels"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO.DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Load datasets to compare against
    data_bays2009 = load_experimental_data.load_data_bays09(data_dir=all_parameters['experiment_data_dir'], fit_mixture_model=True)
    bays09_experimental_mixtures_mean = data_bays2009['em_fits_nitems_arrays']['mean']
    # Assume that T_space >= max(T_space_bays09)
    bays09_T_space = np.unique(data_bays2009['n_items'])

    data_gorgo11 = load_experimental_data.load_data_simult(data_dir=all_parameters['experiment_data_dir'], fit_mixture_model=True)
    gorgo11_experimental_emfits_mean = data_gorgo11['em_fits_nitems_arrays']['mean']
    gorgo11_T_space = np.unique(data_gorgo11['n_items'])

    # Parameters to vary
    T_max = all_parameters['T']
    T_space = np.arange(1, T_max+1)
    repetitions_axis = -1

    # Result arrays
    result_all_precisions = np.nan*np.empty((T_space.size, all_parameters['num_repetitions']))
    # result_fi_theo = np.nan*np.empty((T_space.size, all_parameters['num_repetitions']))
    # result_fi_theocov = np.nan*np.empty((T_space.size, all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.empty((T_space.size, 6, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll, bic
    # result_em_fits_allnontargets = np.nan*np.empty((T_space.size, 5+(T_max-1), all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget (T-1), mixt_random, ll, bic
    result_dist_bays09 = np.nan*np.empty((T_space.size, 4, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random
    result_dist_gorgo11 = np.nan*np.empty((T_space.size, 4, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random
    result_dist_bays09_emmixt_KL = np.nan*np.empty((T_space.size, all_parameters['num_repetitions']))
    result_dist_gorgo11_emmixt_KL = np.nan*np.empty((T_space.size, all_parameters['num_repetitions']))

    # If desired, will automatically save all Model responses.
    if all_parameters['collect_responses']:
        print "--- Collecting all responses..."
        result_responses = np.nan*np.ones((T_space.size, all_parameters['N'], all_parameters['num_repetitions']))
        result_target = np.nan*np.ones((T_space.size, all_parameters['N'], all_parameters['num_repetitions']))
        result_nontargets = np.nan*np.ones((T_space.size, all_parameters['N'], T_max-1, all_parameters['num_repetitions']))

    search_progress = progress.Progress(T_space.size*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for T_i, T in enumerate(T_space):
            print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

            print "Fit for T=%d, %d/%d" % (T, repet_i+1, all_parameters['num_repetitions'])

            # Update parameter
            all_parameters['T'] = T

            ### WORK WORK WORK work? ###
            # Instantiate
            (_, _, _, sampler) = launchers.init_everything(all_parameters)

            # Sample
            sampler.run_inference(all_parameters)

            # Compute precision
            print "get precision..."
            result_all_precisions[T_i, repet_i] = sampler.get_precision()

            # Fit mixture model
            print "fit mixture model..."
            curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
            # curr_params_fit['mixt_nontargets_sum'] = np.sum(curr_params_fit['mixt_nontargets'])
            result_em_fits[T_i, :, repet_i] = [curr_params_fit[key] for key in ['kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL', 'bic']]
            # result_em_fits_allnontargets[T_i, :2, repet_i] = [curr_params_fit['kappa'], curr_params_fit['mixt_target']]
            # result_em_fits_allnontargets[T_i, 2:(2+T-1), repet_i] = curr_params_fit['mixt_nontargets']
            # result_em_fits_allnontargets[T_i, -3:, repet_i] = [curr_params_fit[key] for key in ('mixt_random', 'train_LL', 'bic')]

            # Compute fisher info
            # print "compute fisher info"
            # result_fi_theo[T_i, repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=False)
            # result_fi_theocov[T_i, repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=True)

            # Compute distances to datasets
            if T in bays09_T_space:
                result_dist_bays09[T_i, :, repet_i] = (bays09_experimental_mixtures_mean[:, bays09_T_space == T].flatten() - result_em_fits[T_i, :4, repet_i])**2.

                result_dist_bays09_emmixt_KL[T_i, repet_i] = utils.KL_div(result_em_fits[T_i, 1:4, repet_i], bays09_experimental_mixtures_mean[1:, bays09_T_space == T].flatten())

            if T in gorgo11_T_space:
                result_dist_gorgo11[T_i, :, repet_i] = (gorgo11_experimental_emfits_mean[:, gorgo11_T_space == T].flatten() - result_em_fits[T_i, :4, repet_i])**2.

                result_dist_gorgo11_emmixt_KL[T_i, repet_i] = utils.KL_div(result_em_fits[T_i, 1:4, repet_i], gorgo11_experimental_emfits_mean[1:, gorgo11_T_space == T].flatten())


            # If needed, store responses
            if all_parameters['collect_responses']:
                (responses, target, nontarget) = sampler.collect_responses()
                result_responses[T_i, :, repet_i] = responses
                result_target[T_i, :, repet_i] = target
                result_nontargets[T_i, :, :T_i, repet_i] = nontarget

                print "collected responses"


            print "CURRENT RESULTS:\n", result_all_precisions[T_i, repet_i], curr_params_fit, np.sum(result_dist_bays09[T_i, :, repet_i]), np.sum(result_dist_gorgo11[T_i, :, repet_i]), "\n"
            ### /Work ###

            search_progress.increment()
            if run_counter % save_every == 0 or search_progress.done():
                dataio.save_variables_default(locals())
            run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def launcher_do_mixed_special_stimuli(args):
    '''
        Fit mixed model, varying the ratio_conj
        See how the precision of recall and mixture model parameters evolve
    '''

    print "Doing a piece of work for launcher_do_mixed_special_stimuli"

    try:
        # Convert Argparse.Namespace to dict
        all_parameters = vars(args)
    except TypeError:
        # Assume it's already done
        assert type(args) is dict, "args is neither Namespace nor dict, WHY?"
        all_parameters = args

    print all_parameters

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Parameters to vary
    ratio_space = (np.arange(0, all_parameters['M']**0.5)**2.)/all_parameters['M']

    # Result arrays
    result_all_precisions = np.nan*np.ones((ratio_space.size, all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.ones((ratio_space.size, 5, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll
    result_em_resp = np.nan*np.ones((ratio_space.size, 1+all_parameters['T'], all_parameters['N'], all_parameters['num_repetitions']))

    # If desired, will automatically save all Model responses.
    if all_parameters['subaction'] == 'collect_responses':
        result_responses = np.nan*np.ones((ratio_space.size, all_parameters['N'], all_parameters['num_repetitions']))
        result_target = np.nan*np.ones((ratio_space.size, all_parameters['N'], all_parameters['num_repetitions']))
        result_nontargets = np.nan*np.ones((ratio_space.size, all_parameters['N'], all_parameters['T']-1, all_parameters['num_repetitions']))


    search_progress = progress.Progress(ratio_space.size*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for ratio_i, ratio_conj in enumerate(ratio_space):
            print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

            print "Fit for ratio_conj=%.2f, %d/%d" % (ratio_conj, repet_i+1, all_parameters['num_repetitions'])

            # Update parameter
            all_parameters['ratio_conj'] = ratio_conj

            ### WORK WORK WORK work? ###

            # Generate specific stimuli
            all_parameters['stimuli_generation'] = 'specific_stimuli'

            # Instantiate
            (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters)

            # Sample
            sampler.run_inference(all_parameters)

            # Compute precision
            result_all_precisions[ratio_i, repet_i] = sampler.get_precision()

            # Fit mixture model
            curr_params_fit = em_circularmixture.fit(*sampler.collect_responses())
            curr_resp = em_circularmixture.compute_responsibilities(*(sampler.collect_responses() + (curr_params_fit,) ))

            result_em_fits[ratio_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random', 'train_LL')]
            result_em_resp[ratio_i, 0, :, repet_i] = curr_resp['target']
            result_em_resp[ratio_i, 1:-1, :, repet_i] = curr_resp['nontargets'].T
            result_em_resp[ratio_i, -1, :, repet_i] = curr_resp['random']

            print result_all_precisions[ratio_i, repet_i], curr_params_fit

            # If needed, store responses
            if all_parameters['subaction'] == 'collect_responses':
                (responses, target, nontarget) = sampler.collect_responses()
                result_responses[ratio_i, :, repet_i] = responses
                result_target[ratio_i, :, repet_i] = target
                result_nontargets[ratio_i, ..., repet_i] = nontarget

                print "collected responses"

            ### /Work ###

            search_progress.increment()
            if run_counter % save_every == 0 or search_progress.done():
                dataio.save_variables_default(locals())
            run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def launcher_do_variability_mixture(args):
    '''
        Compute posterior in no noise case, to see the effect of ratio on the mixture probability

    '''

    print "Doing a piece of work for launcher_do_variability_mixture"

    try:
        # Convert Argparse.Namespace to dict
        all_parameters = vars(args)
    except TypeError:
        # Assume it's already done
        assert type(args) is dict, "args is neither Namespace nor dict, WHY?"
        all_parameters = args

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))


    # Do it for multiple ratios and multiple sigmas
    plt.ion()

    # Fix some parameters
    all_parameters['stimuli_generation'] = 'separated'
    all_parameters['stimuli_generation_recall'] = 'random'
    all_parameters['enforce_first_stimulus'] = False
    all_parameters['num_samples'] = 500
    all_parameters['selection_method'] = 'last'
    all_parameters['code_type'] = 'mixed'
    all_parameters['autoset_parameters'] = True
    all_parameters['M'] = 100
    all_parameters['N'] = 100
    all_parameters['inference_method'] = 'none'
    all_parameters['T']  = 2
    all_parameters['sigmay'] = 0.0000001

    # ratio_space = np.array([0.01, 0.05, 0.1, 0.3, 0.5, 0.7, 0.9])
    ratio_space = np.linspace(0.01, 0.7, 20.)

    all_parameters['sigmax'] = 0.05

    num_points = 500

    result_all_posterior = np.nan*np.ones((ratio_space.size, all_parameters['N'], num_points))
    result_all_mixture_params = np.nan*np.ones((ratio_space.size, all_parameters['N'], 3))
    result_all_bimodality_tests = np.nan*np.ones((ratio_space.size, all_parameters['N'], 2))

    search_progress = progress.Progress(ratio_space.size*all_parameters['N'])
    save_every = 10
    print_every = 10
    run_counter = 0
    ax_handle = None


    all_angles = np.linspace(-np.pi, np.pi, num_points)

    for ratio_i, ratio_conj in enumerate(ratio_space):

        all_parameters['ratio_conj'] = ratio_conj
        (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters)

        ### WORK WORK WORK work? ###

        print "Average posterior..."

        for n in xrange(all_parameters['N']):
            if run_counter % print_every == 0:
                print "%.2f%% %s/%s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

            result_all_posterior[ratio_i, n] = sampler.compute_likelihood_fullspace(n=n, all_angles=all_angles, num_points=num_points, should_exponentiate=True, remove_mean=True)[:, -1].T

            result_all_mixture_params[ratio_i, n] = fit_gaussian_mixture_fixedmeans(all_angles, result_all_posterior[ratio_i, n], fixed_means=data_gen.stimuli_correct[n, :, -1], normalise=True, return_fitted_data=False, should_plot=False)

            result_all_bimodality_tests[ratio_i, n] = (bimodality_coefficient(all_angles, result_all_posterior[ratio_i, n]),
                                                       ashman_d(all_angles, result_all_posterior[ratio_i, n])
                                                       )

            ### /Work ###
            search_progress.increment()
            # if run_counter % save_every == 0 or search_progress.done():
                # dataio.save_variables_default(locals())


        print result_all_bimodality_tests

    plt.figure()
    plt.plot(ratio_space, np.mean(result_all_bimodality_tests[..., 0], axis=1))
    plt.title('Bimodality coefficient')

    plt.figure()
    plt.plot(ratio_space, np.mean(result_all_bimodality_tests[..., 1], axis=1))
    plt.title('Ashman D')

    plt.figure()
    plt.plot(ratio_space, np.mean(result_all_mixture_params[..., 0], axis=1))
    plt.title('Mixture proportion')

    # plt.figure()
    # plt.plot(np.mean(np.abs(result_all_mixture_params[..., 1] - result_all_mixture_params[..., 3]), axis=1))
    # plt.title('|mu_1 - mu_2|')

    plt.figure()
    plt.plot(ratio_space, np.mean(result_all_mixture_params[..., 0]*data_gen.stimuli_correct[:, 0, -1] + (1.-result_all_mixture_params[..., 0])*data_gen.stimuli_correct[:, 1, -1], axis=1))
    plt.title('alpha mu_1 + alpha mu_2')

    return locals()
def launcher_do_error_distributions(args):
    '''
        Collect responses for error distribution plots (used in generator/reloader_error_distribution_*.py)

        Do it for T items.

        Looks like the Bays 2009, used in paper.
    '''

    print "Doing a piece of work for launcher_do_error_distributions"

    try:
        # Convert Argparse.Namespace to dict
        all_parameters = vars(args)
    except TypeError:
        # Assume it's already done
        assert type(args) is dict, "args is neither Namespace nor dict, WHY?"
        all_parameters = args

    print all_parameters

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Result arrays
    result_responses = np.nan*np.ones((all_parameters['N'], all_parameters['num_repetitions']))
    result_target = np.nan*np.ones((all_parameters['N'], all_parameters['num_repetitions']))
    result_nontargets = np.nan*np.ones((all_parameters['N'], all_parameters['T']-1, all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.ones((5, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll


    search_progress = progress.Progress(all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

        print "Fit for T=%d, %d/%d" % (all_parameters['T'], repet_i+1, all_parameters['num_repetitions'])

        # Update parameter

        ### WORK WORK WORK work? ###

        # Instantiate
        (_, _, _, sampler) = launchers.init_everything(all_parameters)

        # Sample
        sampler.run_inference(all_parameters)

        # Collect and store responses
        (responses, target, nontarget) = sampler.collect_responses()
        result_responses[:, repet_i] = responses
        result_target[:, repet_i] = target
        result_nontargets[..., repet_i] = nontarget

        # Fit mixture model
        curr_params_fit = em_circularmixture.fit(*sampler.collect_responses())
        result_em_fits[..., repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random', 'train_LL')]

        ### /Work ###

        search_progress.increment()
        if run_counter % save_every == 0 or search_progress.done():
            dataio.save_variables_default(locals())
        run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()