def launcher_do_noise_output_effect_withplots_live(args):
    '''
        Just like launcher_do_noise_output_effect_withplots do plots as well, while the simulation is going on
    '''

    all_parameters = utils.argparse_2_dict(args)

    all_parameters['do_plots_during_simulation'] = True

    return launcher_do_noise_output_effect_withplots(all_parameters)
def launcher_do_receptivesize_effect_withplots_live(args):
    '''
        Just like launcher_do_receptivesize_effect but do plots as well, while the simulation is going on
    '''

    all_parameters = utils.argparse_2_dict(args)

    plotting_parameters = dict(axes={})

    if all_parameters['plot_while_running']:
        # Define the callback function.
        all_parameters['plots_during_simulation_callback'] = dict(function=receptivesize_effect_plots, parameters=plotting_parameters)

        other_launcher_results = launcher_do_receptivesize_effect(all_parameters)
    else:
        # Run the launcher_do_memory_curve_marginal_fi, will do plots later
        other_launcher_results = launcher_do_receptivesize_effect(args)

        # Do the plots
        receptivesize_effect_plots(other_launcher_results, plotting_parameters)


    return other_launcher_results
def launcher_do_fitexperiment_allmetrics(args):
    '''
        Given a single experiment_id, will run the model on all T in the experimental data.
        Computes several metrics (LL, BIC) and can additionally sample from the model and check the Mixture model
        summary statistics fits.

        If inference_method is not none, also fits a EM mixture model, get the precision and the fisher information
    '''

    print "Doing a piece of work for launcher_do_fitexperimentsinglet"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO(
        output_folder=all_parameters['output_directory'],
        label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Result arrays
    # sizes depend on the experiment.
    all_outputs_data = dict()
    T_space = None

    search_progress = progress.Progress(all_parameters['num_repetitions'])
    for repet_i in xrange(all_parameters['num_repetitions']):
        print "\n\n%d/%d | %.2f%%, %s left - %s" % (
            repet_i + 1, all_parameters['num_repetitions'],
            search_progress.percentage(), search_progress.time_remaining_str(),
            search_progress.eta_str())

        ### WORK WORK WORK work? ###

        # Let's build a FitExperimentAllT
        fit_exp = FitExperimentAllT(all_parameters)

        # Setup and evaluate some statistics
        def compute_everything(self, parameters):
            results = dict()

            print ">> Computing LL all N..."
            results['result_ll_n'] = self.sampler.compute_loglikelihood_N()

            print ">> Computing LL sum..."
            results['result_ll_sum'] = np.nansum(results['result_ll_n'])
            print results['result_ll_sum']

            print ">> Computing LL median..."
            results['result_ll_median'] = np.nanmedian(results['result_ll_n'])
            print results['result_ll_median']

            print ">> Computing BIC..."
            results['result_bic'] = self.sampler.compute_bic(
                K=parameters['bic_K'], LL=results['result_ll_sum'])

            print ">> Computing LL90/92/95/97..."
            results['result_ll90_sum'] = (
                self.sampler.compute_loglikelihood_top90percent(
                    all_loglikelihoods=results['result_ll_n']))
            results['result_ll92_sum'] = (
                self.sampler.compute_loglikelihood_top_p_percent(
                    0.92, all_loglikelihoods=results['result_ll_n']))
            results['result_ll95_sum'] = (
                self.sampler.compute_loglikelihood_top_p_percent(
                    0.95, all_loglikelihoods=results['result_ll_n']))
            results['result_ll97_sum'] = (
                self.sampler.compute_loglikelihood_top_p_percent(
                    0.97, all_loglikelihoods=results['result_ll_n']))

            # If sampling_method is not none, try to get em_fits and others
            if not parameters['inference_method'] == 'none':
                print ">> Sampling and fitting mixt model / precision / FI ..."

                # Sample
                print " sampling..."
                self.sampler.run_inference(parameters)

                # Compute precision
                print " get precision..."
                results['result_precision'] = self.sampler.get_precision()

                # Fit mixture model
                print " fit mixture model..."
                curr_params_fit = self.sampler.fit_mixture_model(
                    use_all_targets=False)
                results['result_em_fits'] = np.array([
                    curr_params_fit[key]
                    for key in [
                        'kappa', 'mixt_target', 'mixt_nontargets_sum',
                        'mixt_random', 'train_LL', 'bic'
                    ]
                ])

                # Compute distances to data mixture model
                emfits_distances = self.compute_dist_experimental_em_fits_currentT(
                    results['result_em_fits'])
                results['result_emfit_mse'] = emfits_distances['all_mse']
                results['result_emfit_mse_scaled'] = emfits_distances[
                    'mse_scaled']
                results['result_emfit_memfid_mse'] = emfits_distances[
                    'memfidel_mse']
                results['result_emfit_mixt_kl'] = emfits_distances['mixt_kl']

                # Compute fisher info
                # print " compute fisher info"
                # results['result_fi_theo'] = (
                #     self.sampler.estimate_fisher_info_theocov(
                #         use_theoretical_cov=False))
                # results['result_fi_theocov'] = (
                #     self.sampler.estimate_fisher_info_theocov(
                #         use_theoretical_cov=True))

            return results

        res_listdicts = fit_exp.apply_fct_datasets_allT(
            dict(fct=compute_everything, parameters=all_parameters))

        # Put everything back together (yeah advanced python muck)
        for key in res_listdicts[0]:
            all_outputs_data.setdefault(key, []).append(
                np.array([res[key] for res in res_listdicts]))

        # print "CURRENT RESULTS:"
        # print res_listdicts

        ### /Work ###

        T_space = fit_exp.T_space

        search_progress.increment()
        if run_counter % save_every == 0 or search_progress.done():
            data_to_save = locals()
            data_to_save.update(all_outputs_data)
            dataio.save_variables_default(data_to_save)
        run_counter += 1

    # Convert results to arrays
    # Put the repetition axis at the last dimension, it's kinda my convention...
    for key in res_listdicts[0]:
        all_outputs_data[key] = np.array(all_outputs_data[key])
        all_outputs_data[key] = all_outputs_data[key].transpose(
            np.roll(np.arange(all_outputs_data[key].ndim), -1))

    ### /Work ###

    data_to_save = locals()
    data_to_save.update(all_outputs_data)
    dataio.save_variables_default(data_to_save)
    dataio.save_variables_default(locals())

    #### Plots ###

    print "All finished"
    return locals()
def launcher_do_fit_mixturemodel_dualrecall(args):
    '''
        Run the model for T items, trying to fit
        the DualRecall dataset, which has two conditions.

        Get:
        - Precision
        - EM mixture model fits
        - Theoretical Fisher Information
        - EM Mixture model distances
    '''

    print "Doing a piece of work for launcher_do_fit_mixturemodel_dualrecall"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']


    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO.DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0


    # Load datasets to compare against
    data_dualrecall = load_experimental_data.load_data_dualrecall(data_dir=all_parameters['experiment_data_dir'], fit_mixture_model=True)
    dualrecall_T_space = data_dualrecall['data_to_fit']['n_items']

    dualrecall_experimental_angle_emfits_mean = data_dualrecall['em_fits_angle_nitems_arrays']['mean']
    dualrecall_experimental_colour_emfits_mean = data_dualrecall['em_fits_colour_nitems_arrays']['mean']

    # Parameters to vary
    repetitions_axis = -1

    # Result arrays
    result_all_precisions = np.nan*np.empty((all_parameters['num_repetitions']))
    result_fi_theo = np.nan*np.empty((all_parameters['num_repetitions']))
    result_fi_theocov = np.nan*np.empty((all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.empty((6, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll, bic
    result_dist_dualrecall_angle = np.nan*np.empty((4, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random
    result_dist_dualrecall_angle_emmixt_KL = np.nan*np.empty((all_parameters['num_repetitions']))
    result_dist_dualrecall_colour = np.nan*np.empty((4, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random
    result_dist_dualrecall_colour_emmixt_KL = np.nan*np.empty((all_parameters['num_repetitions']))

    # If desired, will automatically save all Model responses.
    if all_parameters['collect_responses']:
        print "--- Collecting all responses..."
        result_responses = np.nan*np.ones((all_parameters['N'], all_parameters['num_repetitions']))
        result_target = np.nan*np.ones((all_parameters['N'], all_parameters['num_repetitions']))
        result_nontargets = np.nan*np.ones((all_parameters['N'], all_parameters['T'] - 1, all_parameters['num_repetitions']))

    search_progress = progress.Progress(all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

        print "Fit for T=%d, %d/%d" % (all_parameters['T'], repet_i+1, all_parameters['num_repetitions'])

        ## Update parameter

        ### WORK WORK WORK work? ###
        # Instantiate
        (_, _, _, sampler) = launchers.init_everything(all_parameters)

        # Sample
        sampler.run_inference(all_parameters)

        # Compute precision
        print "get precision..."
        result_all_precisions[repet_i] = sampler.get_precision()

        # Fit mixture model
        print "fit mixture model..."
        curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
        # curr_params_fit['mixt_nontargets_sum'] = np.sum(curr_params_fit['mixt_nontargets'])
        result_em_fits[:, repet_i] = [curr_params_fit[key] for key in ['kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL', 'bic']]

        # Compute fisher info
        print "compute fisher info"
        result_fi_theo[repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=False)
        result_fi_theocov[repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=True)

        # Compute distances to datasets
        if all_parameters['T'] in dualrecall_T_space:
            # Angle trials
            result_dist_dualrecall_angle[:, repet_i] = (dualrecall_experimental_angle_emfits_mean[:, dualrecall_T_space == all_parameters['T']].flatten() - result_em_fits[:4, repet_i])**2.
            result_dist_dualrecall_angle_emmixt_KL[repet_i] = utils.KL_div(result_em_fits[1:4, repet_i], dualrecall_experimental_angle_emfits_mean[1:, dualrecall_T_space==all_parameters['T']].flatten())

            # Colour trials
            result_dist_dualrecall_colour[:, repet_i] = (dualrecall_experimental_colour_emfits_mean[:, dualrecall_T_space == all_parameters['T']].flatten() - result_em_fits[:4, repet_i])**2.
            result_dist_dualrecall_colour_emmixt_KL[repet_i] = utils.KL_div(result_em_fits[1:4, repet_i], dualrecall_experimental_colour_emfits_mean[1:, dualrecall_T_space==all_parameters['T']].flatten())

        # If needed, store responses
        if all_parameters['collect_responses']:
            (responses, target, nontarget) = sampler.collect_responses()
            result_responses[:, repet_i] = responses
            result_target[:, repet_i] = target
            result_nontargets[..., repet_i] = nontarget

            print "collected responses"


        print "CURRENT RESULTS:\n", result_all_precisions[repet_i], curr_params_fit, result_fi_theo[repet_i], result_fi_theocov[repet_i], np.sum(result_dist_dualrecall_angle[:, repet_i]), np.sum(result_dist_dualrecall_colour[:, repet_i]), "\n"
        ### /Work ###

        search_progress.increment()
        if run_counter % save_every == 0 or search_progress.done():
            dataio.save_variables_default(locals())
        run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def launcher_do_fit_mixturemodels(args):
    '''
        Run the model for 1..T items, computing:
        - Precision of samples
        - EM mixture model fits
        - Theoretical Fisher Information
        - EM Mixture model distances to set of currently working datasets.
    '''

    print "Doing a piece of work for launcher_do_fit_mixturemodels"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO.DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Load datasets to compare against
    data_bays2009 = load_experimental_data.load_data_bays09(data_dir=all_parameters['experiment_data_dir'], fit_mixture_model=True)
    bays09_experimental_mixtures_mean = data_bays2009['em_fits_nitems_arrays']['mean']
    # Assume that T_space >= max(T_space_bays09)
    bays09_T_space = np.unique(data_bays2009['n_items'])

    data_gorgo11 = load_experimental_data.load_data_simult(data_dir=all_parameters['experiment_data_dir'], fit_mixture_model=True)
    gorgo11_experimental_emfits_mean = data_gorgo11['em_fits_nitems_arrays']['mean']
    gorgo11_T_space = np.unique(data_gorgo11['n_items'])

    # Parameters to vary
    T_max = all_parameters['T']
    T_space = np.arange(1, T_max+1)
    repetitions_axis = -1

    # Result arrays
    result_all_precisions = np.nan*np.empty((T_space.size, all_parameters['num_repetitions']))
    # result_fi_theo = np.nan*np.empty((T_space.size, all_parameters['num_repetitions']))
    # result_fi_theocov = np.nan*np.empty((T_space.size, all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.empty((T_space.size, 6, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll, bic
    # result_em_fits_allnontargets = np.nan*np.empty((T_space.size, 5+(T_max-1), all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget (T-1), mixt_random, ll, bic
    result_dist_bays09 = np.nan*np.empty((T_space.size, 4, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random
    result_dist_gorgo11 = np.nan*np.empty((T_space.size, 4, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random
    result_dist_bays09_emmixt_KL = np.nan*np.empty((T_space.size, all_parameters['num_repetitions']))
    result_dist_gorgo11_emmixt_KL = np.nan*np.empty((T_space.size, all_parameters['num_repetitions']))

    # If desired, will automatically save all Model responses.
    if all_parameters['collect_responses']:
        print "--- Collecting all responses..."
        result_responses = np.nan*np.ones((T_space.size, all_parameters['N'], all_parameters['num_repetitions']))
        result_target = np.nan*np.ones((T_space.size, all_parameters['N'], all_parameters['num_repetitions']))
        result_nontargets = np.nan*np.ones((T_space.size, all_parameters['N'], T_max-1, all_parameters['num_repetitions']))

    search_progress = progress.Progress(T_space.size*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for T_i, T in enumerate(T_space):
            print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

            print "Fit for T=%d, %d/%d" % (T, repet_i+1, all_parameters['num_repetitions'])

            # Update parameter
            all_parameters['T'] = T

            ### WORK WORK WORK work? ###
            # Instantiate
            (_, _, _, sampler) = launchers.init_everything(all_parameters)

            # Sample
            sampler.run_inference(all_parameters)

            # Compute precision
            print "get precision..."
            result_all_precisions[T_i, repet_i] = sampler.get_precision()

            # Fit mixture model
            print "fit mixture model..."
            curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
            # curr_params_fit['mixt_nontargets_sum'] = np.sum(curr_params_fit['mixt_nontargets'])
            result_em_fits[T_i, :, repet_i] = [curr_params_fit[key] for key in ['kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL', 'bic']]
            # result_em_fits_allnontargets[T_i, :2, repet_i] = [curr_params_fit['kappa'], curr_params_fit['mixt_target']]
            # result_em_fits_allnontargets[T_i, 2:(2+T-1), repet_i] = curr_params_fit['mixt_nontargets']
            # result_em_fits_allnontargets[T_i, -3:, repet_i] = [curr_params_fit[key] for key in ('mixt_random', 'train_LL', 'bic')]

            # Compute fisher info
            # print "compute fisher info"
            # result_fi_theo[T_i, repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=False)
            # result_fi_theocov[T_i, repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=True)

            # Compute distances to datasets
            if T in bays09_T_space:
                result_dist_bays09[T_i, :, repet_i] = (bays09_experimental_mixtures_mean[:, bays09_T_space == T].flatten() - result_em_fits[T_i, :4, repet_i])**2.

                result_dist_bays09_emmixt_KL[T_i, repet_i] = utils.KL_div(result_em_fits[T_i, 1:4, repet_i], bays09_experimental_mixtures_mean[1:, bays09_T_space == T].flatten())

            if T in gorgo11_T_space:
                result_dist_gorgo11[T_i, :, repet_i] = (gorgo11_experimental_emfits_mean[:, gorgo11_T_space == T].flatten() - result_em_fits[T_i, :4, repet_i])**2.

                result_dist_gorgo11_emmixt_KL[T_i, repet_i] = utils.KL_div(result_em_fits[T_i, 1:4, repet_i], gorgo11_experimental_emfits_mean[1:, gorgo11_T_space == T].flatten())


            # If needed, store responses
            if all_parameters['collect_responses']:
                (responses, target, nontarget) = sampler.collect_responses()
                result_responses[T_i, :, repet_i] = responses
                result_target[T_i, :, repet_i] = target
                result_nontargets[T_i, :, :T_i, repet_i] = nontarget

                print "collected responses"


            print "CURRENT RESULTS:\n", result_all_precisions[T_i, repet_i], curr_params_fit, np.sum(result_dist_bays09[T_i, :, repet_i]), np.sum(result_dist_gorgo11[T_i, :, repet_i]), "\n"
            ### /Work ###

            search_progress.increment()
            if run_counter % save_every == 0 or search_progress.done():
                dataio.save_variables_default(locals())
            run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def launcher_do_fit_mixturemodels_sequential_alltrecall(args):
    '''
        Run the model for 1..T items sequentially, for all possible trecall/T.
        Compute:
        - Precision of samples
        - EM mixture model fits. Both independent and collapsed model.
        - Theoretical Fisher Information
        - EM Mixture model distances to set of currently working datasets.
    '''

    print "Doing a piece of work for launcher_do_fit_mixturemodels_sequential_alltrecall"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO.DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Load dataset to compare against
    data_gorgo11_sequ = load_experimental_data.load_data_gorgo11_sequential(data_dir=all_parameters['experiment_data_dir'], fit_mixture_model=True)
    gorgo11_sequ_T_space = np.unique(data_gorgo11_sequ['n_items'])


    # Parameters to vary
    T_max = all_parameters['T']
    T_space = np.arange(1, T_max+1)
    repetitions_axis = -1

    # Result arrays
    result_all_precisions = np.nan*np.empty((T_space.size, T_space.size, all_parameters['num_repetitions']))
    result_fi_theo = np.nan*np.empty((T_space.size, T_space.size, all_parameters['num_repetitions']))
    result_fi_theocov = np.nan*np.empty((T_space.size, T_space.size, all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.empty((T_space.size, T_space.size, 6, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll, bic
    result_em_fits_collapsed_tr = np.nan*np.empty((T_space.size, T_space.size, 4, all_parameters['num_repetitions']))  # kappa_tr, mixt_target_tr, mixt_nontarget_tr, mixt_random_tr
    result_em_fits_collapsed_summary = np.nan*np.empty((5, all_parameters['num_repetitions'])) # bic, ll, kappa_theta

    result_dist_gorgo11_sequ = np.nan*np.empty((T_space.size, T_space.size, 4, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random
    result_dist_gorgo11_sequ_emmixt_KL = np.nan*np.empty((T_space.size, T_space.size, all_parameters['num_repetitions']))

    result_dist_gorgo11_sequ_collapsed = np.nan*np.empty((T_space.size, T_space.size, 4, all_parameters['num_repetitions']))
    result_dist_gorgo11_sequ_collapsed_emmixt_KL = np.nan*np.empty((T_space.size, T_space.size, all_parameters['num_repetitions']))

    gorgo11_sequ_collapsed_mixtmod_mean = data_gorgo11_sequ['collapsed_em_fits_doublepowerlaw_array']


    # If desired, will automatically save all Model responses.
    if all_parameters['collect_responses']:
        print "--- Collecting all responses..."
        result_responses = np.nan*np.empty((T_space.size, T_space.size, all_parameters['N'], all_parameters['num_repetitions']))
        result_target = np.nan*np.empty((T_space.size, T_space.size, all_parameters['N'], all_parameters['num_repetitions']))
        result_nontargets = np.nan*np.empty((T_space.size, T_space.size, all_parameters['N'], T_max-1, all_parameters['num_repetitions']))

    search_progress = progress.Progress(T_space.size*(T_space.size + 1)/2.*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for T_i, T in enumerate(T_space):
            for trecall_i, trecall in enumerate(np.arange(T, 0, -1)):
                # Inverting indexing of trecall, to be consistent. trecall_i 0 == last item.
                # But trecall still means the actual time of recall!
                print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())
                print "Fit for T=%d, tr=%d, %d/%d" % (T, trecall, repet_i+1, all_parameters['num_repetitions'])

                # Update parameter
                all_parameters['T'] = T
                all_parameters['fixed_cued_feature_time'] = trecall - 1

                ### WORK WORK WORK work? ###
                # Instantiate
                (_, _, _, sampler) = launchers.init_everything(all_parameters)

                # Sample
                sampler.run_inference(all_parameters)

                # Compute precision
                print "get precision..."
                result_all_precisions[T_i, trecall_i, repet_i] = sampler.get_precision()

                # Fit mixture model, independent
                print "fit mixture model..."
                curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
                result_em_fits[T_i, trecall_i, :, repet_i] = [curr_params_fit[key] for key in ['kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL', 'bic']]

                # Compute fisher info
                print "compute fisher info"
                result_fi_theo[T_i, trecall_i, repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=False)
                result_fi_theocov[T_i, trecall_i, repet_i] = sampler.estimate_fisher_info_theocov(use_theoretical_cov=True)

                # Compute distances to datasets (this is for the non-collapsed stuff, not the best)
                if T in gorgo11_sequ_T_space:
                    gorgo11_sequ_mixtures_mean = data_gorgo11_sequ['em_fits_nitems_trecall_arrays'][gorgo11_sequ_T_space==T, trecall_i, :4].flatten()

                    result_dist_gorgo11_sequ[T_i, trecall_i, :, repet_i] = (gorgo11_sequ_mixtures_mean - result_em_fits[T_i, trecall_i, :4, repet_i])**2.
                    result_dist_gorgo11_sequ_emmixt_KL[T_i, trecall_i, repet_i] = utils.KL_div(result_em_fits[T_i, trecall_i, 1:4, repet_i], gorgo11_sequ_mixtures_mean[1:])


                # If needed, store responses
                if all_parameters['collect_responses']:
                    print "collect responses"
                    (responses, target, nontarget) = sampler.collect_responses()
                    result_responses[T_i, trecall_i, :, repet_i] = responses
                    result_target[T_i, trecall_i, :, repet_i] = target
                    result_nontargets[T_i, trecall_i, :, :T_i, repet_i] = nontarget


                print "CURRENT RESULTS:\n", result_all_precisions[T_i, trecall_i, repet_i], curr_params_fit, result_fi_theo[T_i, trecall_i, repet_i], result_fi_theocov[T_i, trecall_i, repet_i], np.sum(result_dist_gorgo11_sequ[T_i, trecall_i, :, repet_i]), np.sum(result_dist_gorgo11_sequ_emmixt_KL[T_i, trecall_i, repet_i]), "\n"
                ### /Work ###

                search_progress.increment()
                if run_counter % save_every == 0 or search_progress.done():
                    dataio.save_variables_default(locals())
                run_counter += 1

        # Fit Collapsed mixture model
        # TODO check dimensionality...
        print 'Fitting Collapsed double powerlaw mixture model...'
        params_fit = em_circularmixture_parametrickappa_doublepowerlaw.fit(T_space, result_responses[..., repet_i], result_target[..., repet_i], result_nontargets[..., repet_i], debug=False)

        # First store the parameters that depend on T/trecall
        for i, key in enumerate(['kappa', 'mixt_target_tr', 'mixt_nontargets_tr', 'mixt_random_tr']):
            result_em_fits_collapsed_tr[..., i, repet_i] =  params_fit[key]

        # Then the ones that do not, only one per full collapsed fit.
        result_em_fits_collapsed_summary[0, repet_i] = params_fit['bic']
        # result_em_fits_collapsed_summary[1, repet_i] = params_fit['train_LL']
        result_em_fits_collapsed_summary[2:, repet_i] = params_fit['kappa_theta']

        # Compute distances to dataset for collapsed model
        result_dist_gorgo11_sequ_collapsed[..., repet_i] = (gorgo11_sequ_collapsed_mixtmod_mean - result_em_fits_collapsed_tr[..., repet_i])**2.
        result_dist_gorgo11_sequ_collapsed_emmixt_KL[..., repet_i] = utils.KL_div(result_em_fits_collapsed_tr[..., 1:4, repet_i], gorgo11_sequ_collapsed_mixtmod_mean[..., 1:], axis=-1)


    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def launcher_do_noise_output_effect_allT(args):
    '''
        Run the model for 1..T items, varying sigma_output
    '''

    print "Doing a piece of work for launcher_do_noise_output_effect_allT"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

    if 'plots_during_simulation_callback' in all_parameters:
        plots_during_simulation_callback = all_parameters['plots_during_simulation_callback']
        del all_parameters['plots_during_simulation_callback']
    else:
        plots_during_simulation_callback = None

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO.DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Parameters to vary
    T_max = all_parameters['T']
    T_space = np.arange(1, T_max+1)
    repetitions_axis = -1

    # Parameters to vary
    precision_sigmaoutput = 20
    sigmaoutput_space = np.linspace(0.0, 0.5, precision_sigmaoutput)

    # Result arrays
    result_all_precisions = np.nan*np.ones((sigmaoutput_space.size, T_max, all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.ones((sigmaoutput_space.size, T_max, 6, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll, bic

    search_progress = progress.Progress(sigmaoutput_space.size*T_max*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for sigmaoutput_i, sigma_output in enumerate(sigmaoutput_space):
            for T_i, T in enumerate(T_space):
                print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

                print "Fit for sigma_output=%.3f, T %d, %d/%d" % (sigma_output, T, repet_i+1, all_parameters['num_repetitions'])

                # Update parameter
                all_parameters['sigma_output'] = sigma_output
                all_parameters['T'] = T

                ### WORK WORK WORK work? ###

                # Fix some parameters
                # all_parameters['stimuli_generation'] = 'separated'
                # all_parameters['slice_width'] = np.pi/64.

                # Instantiate
                (_, _, _, sampler) = launchers.init_everything(all_parameters)

                # Sample
                sampler.run_inference(all_parameters)

                # Compute precision
                print "get precision..."
                result_all_precisions[sigmaoutput_i, T_i, repet_i] = sampler.get_precision()

                # Fit mixture model
                print "fit mixture model..."
                curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
                result_em_fits[sigmaoutput_i, T_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL', 'bic')]

                print result_all_precisions[sigmaoutput_i, T_i, repet_i], curr_params_fit

                ## Run callback function if exists
                if plots_during_simulation_callback:
                    print "Doing plots..."
                    try:
                        # Best super safe, if this fails then the simulation must continue!
                        plots_during_simulation_callback['function'](locals(), plots_during_simulation_callback['parameters'])
                        print "plots done."
                    except:
                        print "error during plotting callback function", plots_during_simulation_callback['function'], plots_during_simulation_callback['parameters']

                ### /Work ###
                search_progress.increment()
                if run_counter % save_every == 0 or search_progress.done():
                    dataio.save_variables_default(locals())
                run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def launcher_do_noise_output_effect_withplots(args):
    '''
        Just like launcher_do_noise_output_effect but do plots as well
    '''

    all_parameters = utils.argparse_2_dict(args)

    if all_parameters['code_type'] == 'hierarchical':
        # Use ratio_conj for plotting/titles
        if all_parameters['ratio_hierarchical'] is not None:
            all_parameters['ratio_conj'] = all_parameters['ratio_hierarchical']

    plotting_parameters = dict(axes=dict(ax_sigmaoutput_kappa=None, ax_sigmaoutput_mixtures=None))

    ### Now do the plots
    def do_sigma_output_plot(variables_launcher_running, plotting_parameters):
        dataio = variables_launcher_running['dataio']
        sigmaoutput_space = variables_launcher_running['sigmaoutput_space']

        result_em_fits_mean = utils.nanmean(variables_launcher_running['result_em_fits'], axis=-1)
        result_em_fits_std = utils.nanstd(variables_launcher_running['result_em_fits'], axis=-1)

        plt.ion()

        # Memory curve kappa
        def sigmaoutput_plot_kappa(sigmaoutput_space, result_em_fits_mean, result_em_fits_std=None, exp_name='', ax=None):

            if ax is not None:
                plt.figure(ax.get_figure().number)
                ax.hold(False)

            ax = utils.plot_mean_std_area(sigmaoutput_space, result_em_fits_mean[..., 0], result_em_fits_std[..., 0], xlabel='sigma output', ylabel='Memory fidelity', linewidth=3, fmt='o-', markersize=8, label='Noise output effect', ax_handle=ax)

            ax.hold(True)

            ax.set_title("{{exp_name}} {T} {M} {ratio_conj:.2f} {sigmax:.3f} {sigmay:.2f}".format(**variables_launcher_running['all_parameters']).format(exp_name=exp_name))
            ax.legend()
            # ax.set_xlim([0.9, T_space_exp.max()+0.1])
            # ax.set_xticks(range(1, T_space_exp.max()+1))
            # ax.set_xticklabels(range(1, T_space_exp.max()+1))

            ax.get_figure().canvas.draw()

            dataio.save_current_figure('noiseoutput_kappa_%s_T{T}_M{M}_ratio{ratio_conj}_sigmax{sigmax}_sigmay{sigmay}_{{label}}_{{unique_id}}.pdf'.format(**variables_launcher_running['all_parameters']) % (exp_name))

            return ax

        # Plot EM Mixtures proportions
        def sigmaoutput_plot_mixtures(sigmaoutput_space, result_em_fits_mean, result_em_fits_std, exp_name='', ax=None):

            if ax is None:
                _, ax = plt.subplots()

            if ax is not None:
                plt.figure(ax.get_figure().number)
                ax.hold(False)

            # mixture probabilities
            print result_em_fits_mean[..., 1]

            result_em_fits_mean[np.isnan(result_em_fits_mean)] = 0.0
            result_em_fits_std[np.isnan(result_em_fits_std)] = 0.0

            utils.plot_mean_std_area(sigmaoutput_space, result_em_fits_mean[..., 1], result_em_fits_std[..., 1], xlabel='sigma output', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Target')
            ax.hold(True)
            utils.plot_mean_std_area(sigmaoutput_space, result_em_fits_mean[..., 2], result_em_fits_std[..., 2], xlabel='sigma output', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Nontarget')
            utils.plot_mean_std_area(sigmaoutput_space, result_em_fits_mean[..., 3], result_em_fits_std[..., 3], xlabel='sigma output', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='o-', markersize=5, label='Random')

            ax.legend(prop={'size':15})

            ax.set_title("{{exp_name}} {T} {M} {ratio_conj:.2f} {sigmax:.3f} {sigmay:.2f}".format(**variables_launcher_running['all_parameters']).format(exp_name=exp_name))

            ax.set_ylim([0.0, 1.1])
            ax.get_figure().canvas.draw()

            dataio.save_current_figure('memorycurves_emfits_%s_T{T}_M{M}_ratio{ratio_conj}_sigmax{sigmax}_sigmay{sigmay}_{{label}}_{{unique_id}}.pdf'.format(**variables_launcher_running['all_parameters']) % (exp_name))

            return ax

        # Do plots
        plotting_parameters['axes']['ax_sigmaoutput_kappa'] = sigmaoutput_plot_kappa(sigmaoutput_space, result_em_fits_mean, result_em_fits_std, exp_name='kappa', ax=plotting_parameters['axes']['ax_sigmaoutput_kappa'])
        plotting_parameters['axes']['ax_sigmaoutput_mixtures'] = sigmaoutput_plot_mixtures(sigmaoutput_space, result_em_fits_mean, result_em_fits_std, exp_name='mixt probs', ax=plotting_parameters['axes']['ax_sigmaoutput_mixtures'])


    if all_parameters.get('do_plots_during_simulation', False):
        # Define the callback function.
        all_parameters['plots_during_simulation_callback'] = dict(function=do_sigma_output_plot, parameters=plotting_parameters)
        # Run the launcher_do_noise_output_effect, plots are done during the runs automatically
        other_launcher_results = launcher_do_noise_output_effect(all_parameters)
    else:
        # Run the launcher_do_noise_output_effect, will do plots later
        other_launcher_results = launcher_do_noise_output_effect(args)

        # Do the plots
        do_sigma_output_plot(other_launcher_results, plotting_parameters)

    # Return the output of the other launcher.
    return other_launcher_results
def launcher_check_fisher_fit_1obj_2016(args):
  print "Doing a piece of work for launcher_check_fisher_fit_1obj_2016"

  all_parameters = utils.argparse_2_dict(args)
  print all_parameters

  if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
    print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

  if 'plots_during_simulation_callback' in all_parameters:
    plots_during_simulation_callback = all_parameters[
        'plots_during_simulation_callback']
    del all_parameters['plots_during_simulation_callback']
  else:
    plots_during_simulation_callback = None

  # Create DataIO
  #  (complete label with current variable state)
  dataio = DataIO.DataIO(
      output_folder=all_parameters['output_directory'],
      label=all_parameters['label'].format(**all_parameters))
  save_every = 1
  run_counter = 0

  # Result arrays
  result_all_precisions = np.nan * np.empty(
      (all_parameters['num_repetitions']), dtype=float)
  result_FI_rc_curv = np.nan * np.empty(
      (all_parameters['N'], all_parameters['num_repetitions']), dtype=float)
  result_FI_rc_theo = np.nan * np.empty(
      (all_parameters['N'], all_parameters['num_repetitions']), dtype=float)
  result_FI_rc_theocov = np.nan * np.empty(
      (all_parameters['N'], all_parameters['num_repetitions']), dtype=float)
  result_FI_rc_theo_circulant = np.nan * np.empty(
      (all_parameters['N'], all_parameters['num_repetitions']), dtype=float)
  result_FI_rc_theo_largeN = np.nan * np.empty(
      (all_parameters['num_repetitions']), dtype=float)
  result_marginal_inv_FI = np.nan * np.ones(
      (2, all_parameters['num_repetitions']))
  result_marginal_FI = np.nan * np.ones((2, all_parameters['num_repetitions']))

  result_em_fits = np.nan * np.empty((6, all_parameters['num_repetitions']))

  search_progress = progress.Progress(all_parameters['num_repetitions'])

  for repet_i in xrange(all_parameters['num_repetitions']):
    print "%.2f%%, %s left - %s" % (search_progress.percentage(),
                                    search_progress.time_remaining_str(),
                                    search_progress.eta_str())

    print "Fisher Info check, rep %d/%d" % (repet_i + 1,
                                            all_parameters['num_repetitions'])

    ### WORK WORK WORK work? ###

    # Instantiate
    (_, _, _, sampler) = launchers.init_everything(all_parameters)

    # Sample
    sampler.run_inference(all_parameters)

    # Compute precision
    print "get precision..."
    result_all_precisions[repet_i] = sampler.get_precision()

    # Theoretical Fisher info
    if all_parameters['code_type'] != 'hierarchical':
      print "theoretical FI"
      result_FI_rc_theo[:, repet_i] = (
          sampler.estimate_fisher_info_theocov(use_theoretical_cov=False))
      result_FI_rc_theocov[:, repet_i] = (
          sampler.estimate_fisher_info_theocov(use_theoretical_cov=True))
      result_FI_rc_theo_largeN[repet_i] = (
          sampler.estimate_fisher_info_theocov_largen(use_theoretical_cov=True)
      )
      result_FI_rc_theo_circulant[:, repet_i] = (
          sampler.estimate_fisher_info_circulant())
    # Fisher Info from curvature
    print "Compute fisher from curvature"
    fi_curv_dict = sampler.estimate_fisher_info_from_posterior_avg(
        num_points=500, full_stats=True)
    result_FI_rc_curv[:, repet_i] = fi_curv_dict['all']

    # Fit mixture model
    print "fit mixture model..."
    curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
    curr_params_fit['mixt_nontargets_sum'] = np.sum(
        curr_params_fit['mixt_nontargets'])
    result_em_fits[..., repet_i] = [
        curr_params_fit[key]
        for key in ('kappa', 'mixt_target', 'mixt_nontargets_sum',
                    'mixt_random', 'train_LL', 'bic')
    ]

    # Compute marginal inverse fisher info
    print "compute marginal inverse fisher info"
    marginal_fi_dict = sampler.estimate_marginal_inverse_fisher_info_montecarlo(
    )
    result_marginal_inv_FI[:, repet_i] = [
        marginal_fi_dict[key] for key in ('inv_FI', 'inv_FI_std')
    ]
    result_marginal_FI[:, repet_i] = [
        marginal_fi_dict[key] for key in ('FI', 'FI_std')
    ]

    ## Run callback function if exists
    if plots_during_simulation_callback:
      print "Doing plots..."
      try:
        # Best super safe, if this fails then the simulation must continue!
        plots_during_simulation_callback['function'](
            locals(), plots_during_simulation_callback['parameters'])
        print "plots done."
      except Exception:
        print "error during plotting callback function", plots_during_simulation_callback[
            'function'], plots_during_simulation_callback['parameters']

    ### /Work ###
    search_progress.increment()
    if run_counter % save_every == 0 or search_progress.done():
      dataio.save_variables_default(locals())
    run_counter += 1

  # Finished
  dataio.save_variables_default(locals())

  print "All finished"
  return locals()
def launcher_do_check_scaling_ratio_with_M(args):
    '''
        Reviewer 3 asked to see if the proportion of conjunctive units varies with M when a given precision is to be achieved.

        Check it.
    '''

    print "Doing a piece of work for launcher_do_check_scaling_ratio_with_M"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

    if 'plots_during_simulation_callback' in all_parameters:
        plots_during_simulation_callback = all_parameters['plots_during_simulation_callback']
        del all_parameters['plots_during_simulation_callback']
    else:
        plots_during_simulation_callback = None

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO.DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Fix some parameters
    all_parameters['autoset_parameters'] = True

    # Parameters to vary
    nb_M_space = 10
    M_max = 800
    M_min = 20
    M_space = np.arange(M_min, M_max, np.ceil((M_max - M_min)/float(nb_M_space)), dtype=int)
    nb_ratio_space = 10
    ratio_space = np.linspace(0.0001, 1.0, nb_ratio_space)

    # Result arrays
    result_all_precisions = np.nan*np.ones((M_space.size, ratio_space.size, all_parameters['num_repetitions']))
    result_em_fits = np.nan*np.ones((M_space.size, ratio_space.size, 5, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll

    search_progress = progress.Progress(M_space.size*ratio_space.size*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for M_i, M in enumerate(M_space):
            for ratio_i, ratio in enumerate(ratio_space):
                print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

                print "Fit for M=%d, ratio=%.3f  %d/%d" % (M, ratio, repet_i+1, all_parameters['num_repetitions'])

                # Update parameter
                all_parameters['M'] = M
                all_parameters['ratio_conj'] = ratio

                ### WORK WORK WORK work? ###


                try:
                    # Instantiate
                    (_, _, _, sampler) = launchers.init_everything(all_parameters)

                    # Sample
                    sampler.run_inference(all_parameters)

                    # Compute precision
                    print "get precision..."
                    result_all_precisions[M_i, ratio_i, repet_i] = sampler.get_precision()

                    # Fit mixture model
                    print "fit mixture model..."
                    curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
                    result_em_fits[M_i, ratio_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL')]

                except Exception:
                    # oh well...
                    print "something failed here, so sad"

                print result_all_precisions[M_i, ratio_i, repet_i], curr_params_fit

                ## Run callback function if exists
                if plots_during_simulation_callback:
                    print "Doing plots..."
                    try:
                        # Best super safe, if this fails then the simulation must continue!
                        plots_during_simulation_callback['function'](locals(), plots_during_simulation_callback['parameters'])
                        print "plots done."
                    except Exception as e:
                        print "error during plotting callback function", plots_during_simulation_callback['function'], plots_during_simulation_callback['parameters']
                        print e
                        traceback.print_exc()

                ### /Work ###
                search_progress.increment()
                if run_counter % save_every == 0 or search_progress.done():
                    dataio.save_variables_default(locals())
                run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()
def launcher_do_receptivesize_effect(args):
    '''
        Run the model for 1 item, varying the receptive size scale.
        Compute:
        - Precision of samples
        - EM mixture model fits
        - Marginal Inverse Fisher Information
    '''

    print "Doing a piece of work for launcher_do_receptivesize_effect"

    all_parameters = utils.argparse_2_dict(args)
    print all_parameters

    if all_parameters['burn_samples'] + all_parameters['num_samples'] < 200:
        print "WARNING> you do not have enough samples I think!", all_parameters['burn_samples'] + all_parameters['num_samples']

    if 'plots_during_simulation_callback' in all_parameters:
        plots_during_simulation_callback = all_parameters['plots_during_simulation_callback']
        del all_parameters['plots_during_simulation_callback']
    else:
        plots_during_simulation_callback = None

    # Create DataIO
    #  (complete label with current variable state)
    dataio = DataIO.DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters))
    save_every = 1
    run_counter = 0

    # Fix some parameters
    all_parameters['autoset_parameters'] = False
    all_parameters['feat_ratio'] = -1.  # hack to automatically set the ratio

    # Parameters to vary
    rcscale_space = np.linspace(0.0001, 40., 30)

    # Result arrays
    result_all_precisions = np.nan*np.ones((rcscale_space.size, all_parameters['num_repetitions']))
    result_marginal_inv_fi = np.nan*np.ones((rcscale_space.size, 4, all_parameters['num_repetitions']))  # inv_FI, inv_FI_std, FI, FI_std
    result_em_fits = np.nan*np.ones((rcscale_space.size, 5, all_parameters['num_repetitions']))  # kappa, mixt_target, mixt_nontarget, mixt_random, ll

    search_progress = progress.Progress(rcscale_space.size*all_parameters['num_repetitions'])

    for repet_i in xrange(all_parameters['num_repetitions']):
        for rc_scale_i, rc_scale in enumerate(rcscale_space):
            print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str())

            print "Fit for rc_scale=%.2f, %d/%d" % (rc_scale, repet_i+1, all_parameters['num_repetitions'])

            # Update parameter
            all_parameters['rc_scale'] = rc_scale

            ### WORK WORK WORK work? ###

            # Instantiate
            (_, _, _, sampler) = launchers.init_everything(all_parameters)

            # Sample
            sampler.run_inference(all_parameters)

            # Compute precision
            print "get precision..."
            result_all_precisions[rc_scale_i, repet_i] = sampler.get_precision()

            # Fit mixture model
            print "fit mixture model..."
            curr_params_fit = sampler.fit_mixture_model(use_all_targets=False)
            result_em_fits[rc_scale_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets_sum', 'mixt_random', 'train_LL')]

            # Compute marginal inverse fisher info
            print "compute marginal inverse fisher info"
            marginal_fi_dict = sampler.estimate_marginal_inverse_fisher_info_montecarlo()
            result_marginal_inv_fi[rc_scale_i, :, repet_i] = [marginal_fi_dict[key] for key in ('inv_FI', 'inv_FI_std', 'FI', 'FI_std')]


            print result_all_precisions[rc_scale_i, repet_i], curr_params_fit, marginal_fi_dict

            ## Run callback function if exists
            if plots_during_simulation_callback:
                print "Doing plots..."
                try:
                    # Best super safe, if this fails then the simulation must continue!
                    plots_during_simulation_callback['function'](locals(), plots_during_simulation_callback['parameters'])
                    print "plots done."
                except Exception as e:
                    print "error during plotting callback function", plots_during_simulation_callback['function'], plots_during_simulation_callback['parameters']
                    print e
                    traceback.print_exc()

            ### /Work ###
            search_progress.increment()
            if run_counter % save_every == 0 or search_progress.done():
                dataio.save_variables_default(locals())
            run_counter += 1

    # Finished
    dataio.save_variables_default(locals())

    print "All finished"
    return locals()