def test_simple(): ''' Does a Unit test, samples data from a mixture of one Von mises and random perturbations. Then fits the model and check if everything works. ''' import em_circularmixture show_checks = False N = 200 Tmax = 5 T_space = np.arange(1, Tmax+1) kappa_theta = np.array([10., -0.7, -0.3]) angles_nontargets = utils.sample_angle((Tmax, Tmax, Tmax-1)) targets = np.zeros((Tmax, Tmax, N)) nontargets = np.ones((Tmax, Tmax, N, Tmax-1))*angles_nontargets[:, :, np.newaxis, :] responses = np.zeros((Tmax, Tmax, N)) # Filter impossible trecalls ind_filt = np.triu_indices(Tmax, 1) targets[ind_filt] = np.nan nontargets[ind_filt] = np.nan responses[ind_filt] = np.nan # Correct nontargets just to be sure for T_i, T in enumerate(T_space): for trecall_i, trecall in enumerate(T_space): nontargets[T_i, trecall_i, :, (T-1):] = np.nan for T_i, T in enumerate(T_space): for trecall_i, trecall in enumerate(T_space): if trecall <= T: kappa_target = compute_kappa(T, trecall, kappa_theta) em_fit_target = dict(kappa=kappa_target, mixt_target=0.75, mixt_nontargets=0.15, mixt_random=0.1) # Sample from Von Mises responses[T_i, trecall_i] = em_circularmixture.sample_from_fit(em_fit_target, targets[T_i, trecall_i], nontargets[T_i, trecall_i, :, :(T - 1)]) print "T: {T:d}, trecall: {trecall:d}".format(T=T, trecall=trecall) if show_checks: em_fit = em_circularmixture.fit(responses[T_i, trecall_i], targets[T_i, trecall_i], nontargets[T_i, trecall_i, :, :(T - 1)]) print "True: kappa={kappa:.5}, pt={mixt_target:.3}, pnt={mixt_nontargets:.3}, pr={mixt_random:.3}".format(**em_fit_target) print "Fitted: kappa={kappa:.5}, pt={mixt_target:.3}, pnt={mixt_nontargets:.3}, pr={mixt_random:.3}".format(**em_fit) # Now try full fit with alpha/beta/gamma # result_dict = fit(T_space, responses, targets, nontargets, debug=True) return T_space, responses, targets, nontargets
def test_bays09like(): ''' Uses kappa and prob mixtures from Bays09 ''' N = 2000 T_space = np.array([1, 2, 4, 6]) Tnum = T_space.size Tmax = T_space.max() kappa_space = np.array([ 19.76349326, 11.2619971 , 9.22001848, 8.30524648]) probtarget_space = np.array([ 0.98688956, 0.92068596, 0.71474023, 0.5596124 ]) probnontarget_space = np.array([ 0. , 0.02853913, 0.10499085, 0.28098455]) probrandom_space = np.array([ 0.01311044, 0.05077492, 0.18026892, 0.15940305]) beta, alpha = utils.fit_powerlaw(T_space, kappa_space) angles_nontargets = utils.sample_angle((Tnum, Tmax-1)) targets = np.zeros((Tnum, N)) nontargets = np.ones((Tnum, N, Tmax-1))*angles_nontargets[:, np.newaxis, :] responses = np.zeros((Tnum, N)) for K_i, K in enumerate(T_space): nontargets[K_i, :, (K-1):] = np.nan for T_i, T in enumerate(T_space): kappa_target = alpha*(T)**beta em_fit_target = dict(kappa=kappa_target, alpha=alpha, beta=beta, mixt_target=probtarget_space[T_i], mixt_nontargets=probnontarget_space[T_i], mixt_random=probrandom_space[T_i]) import em_circularmixture # Sample from Von Mises responses[T_i] = sample_from_fit(em_fit_target, targets[T_i], nontargets[T_i, :, :T_i]) em_fit = em_circularmixture.fit(responses[T_i], targets[T_i], nontargets[T_i, :, :(T-1)]) print "True: kappa={kappa:.5}, pt={mixt_target:.3}, pnt={mixt_nontargets:.3}, pr={mixt_random:.3}".format(**em_fit_target) print "Fitted: kappa={kappa:.5}, pt={mixt_target:.3}, pnt={mixt_nontargets:.3}, pr={mixt_random:.3}".format(**em_fit) # Now try full fit with alpha/beta # em_fit = fit(responses, targets, nontargets) return T_space, responses, targets, nontargets
def test_simple(): ''' Does a Unit test, samples data from a mixture of one Von mises and random perturbations. Then fits the model and check if everything works. ''' N = 1000 Tmax = 5 T_space = np.arange(1, Tmax+1) alpha = 9.8 beta = -0.58 angles_nontargets = utils.sample_angle((Tmax, Tmax-1)) targets = np.zeros((Tmax, N)) nontargets = np.ones((Tmax, N, Tmax-1))*angles_nontargets[:, np.newaxis, :] responses = np.zeros((Tmax, N)) # Correct nontargets just to be sure for K_i, K in enumerate(T_space): nontargets[K_i, :, (K-1):] = np.nan for K in xrange(Tmax): kappa_target = alpha*(K+1.0)**beta em_fit_target = dict(kappa=kappa_target, alpha=alpha, beta=beta, mixt_target=0.7, mixt_nontargets=0.2, mixt_random=0.1) import em_circularmixture # Sample from Von Mises responses[K] = sample_from_fit(em_fit_target, targets[K], nontargets[K, :, :K]) em_fit = em_circularmixture.fit(responses[K], targets[K], nontargets[K, :, :K]) print "True: kappa={kappa:.5}, pt={mixt_target:.3}, pnt={mixt_nontargets:.3}, pr={mixt_random:.3}".format(**em_fit_target) print "Fitted: kappa={kappa:.5}, pt={mixt_target:.3}, pnt={mixt_nontargets:.3}, pr={mixt_random:.3}".format(**em_fit) # Now try full fit with alpha/beta # em_fit = fit(responses, targets, nontargets) return T_space, responses, targets, nontargets
def plots_misbinding_logposterior(data_pbs, generator_module=None): ''' Reload 3D volume runs from PBS and plot them ''' #### SETUP # savedata = True savefigs = True plot_logpost = False plot_error = False plot_mixtmodel = False # #### /SETUP print "Order parameters: ", generator_module.dict_parameters_range.keys() result_all_log_posterior = np.squeeze(data_pbs.dict_arrays['result_all_log_posterior']['results']) result_all_thetas = np.squeeze(data_pbs.dict_arrays['result_all_thetas']['results']) M_space = data_pbs.loaded_data['parameters_uniques']['M'] M_lower_space = data_pbs.loaded_data['parameters_uniques']['M_layer_one'] ratio_space = M_space/M_lower_space.astype(float) print M_space, M_lower_space, ratio_space print result_all_log_posterior.shape, result_all_thetas.shape N = result_all_thetas.shape[-1] result_prob_wrong = np.zeros((ratio_space.size, N)) result_em_fits = np.empty((ratio_space.size, 5))*np.nan fixed_means = [-np.pi*0.6, np.pi*0.6] all_angles = np.linspace(-np.pi, np.pi, result_all_log_posterior.shape[-1]) dataio = DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename']) plt.rcParams['font.size'] = 18 if plot_logpost: for ratio_conj_i, ratio_conj in enumerate(ratio_space): # ax = plot_mean_std_area(all_angles, nanmean(result_all_log_posterior[ratio_conj_i], axis=0), nanstd(result_all_log_posterior[ratio_conj_i], axis=0)) # ax.set_xlim((-np.pi, np.pi)) # ax.set_xticks((-np.pi, -np.pi / 2, 0, np.pi / 2., np.pi)) # ax.set_xticklabels((r'$-\pi$', r'$-\frac{\pi}{2}$', r'$0$', r'$\frac{\pi}{2}$', r'$\pi$')) # ax.set_yticks(()) # ax.get_figure().canvas.draw() # if savefigs: # dataio.save_current_figure('results_misbinding_logpost_ratioconj%.2f_{label}_global_{unique_id}.pdf' % ratio_conj) # Compute the probability of answering wrongly (from fitting mixture distrib onto posterior) for n in xrange(result_all_log_posterior.shape[1]): result_prob_wrong[ratio_conj_i, n], _, _ = fit_gaussian_mixture_fixedmeans(all_angles, np.exp(result_all_log_posterior[ratio_conj_i, n]), fixed_means=fixed_means, normalise=True, return_fitted_data=False, should_plot=False) # ax = plot_mean_std_area(ratio_space, nanmean(result_prob_wrong, axis=-1), nanstd(result_prob_wrong, axis=-1)) plt.figure() plt.plot(ratio_space, nanmean(result_prob_wrong, axis=-1)) # ax.get_figure().canvas.draw() if savefigs: dataio.save_current_figure('results_misbinding_probwrongpost_allratioconj_{label}_global_{unique_id}.pdf') if plot_error: ## Compute Standard deviation/precision from samples and plot it as a function of ratio_conj stats = compute_mean_std_circular_data(wrap_angles(result_all_thetas - fixed_means[1]).T) f = plt.figure() plt.plot(ratio_space, stats['std']) plt.ylabel('Standard deviation [rad]') if savefigs: dataio.save_current_figure('results_misbinding_stddev_allratioconj_{label}_global_{unique_id}.pdf') f = plt.figure() plt.plot(ratio_space, compute_angle_precision_from_std(stats['std'], square_precision=False), linewidth=2) plt.ylabel('Precision [$1/rad$]') plt.xlabel('Proportion of conjunctive units') plt.grid() if savefigs: dataio.save_current_figure('results_misbinding_precision_allratioconj_{label}_global_{unique_id}.pdf') ## Compute the probability of misbinding # 1) Just count samples < 0 / samples tot # 2) Fit a mixture model, average over mixture probabilities prob_smaller0 = np.sum(result_all_thetas <= 1, axis=1)/float(result_all_thetas.shape[1]) em_centers = np.zeros((ratio_space.size, 2)) em_covs = np.zeros((ratio_space.size, 2)) em_pk = np.zeros((ratio_space.size, 2)) em_ll = np.zeros(ratio_space.size) for ratio_conj_i, ratio_conj in enumerate(ratio_space): cen_lst, cov_lst, em_pk[ratio_conj_i], em_ll[ratio_conj_i] = pygmm.em(result_all_thetas[ratio_conj_i, np.newaxis].T, K = 2, max_iter = 400, init_kw={'cluster_init':'fixed', 'fixed_means': fixed_means}) em_centers[ratio_conj_i] = np.array(cen_lst).flatten() em_covs[ratio_conj_i] = np.array(cov_lst).flatten() # print em_centers # print em_covs # print em_pk f = plt.figure() plt.plot(ratio_space, prob_smaller0) plt.ylabel('Misbound proportion') if savefigs: dataio.save_current_figure('results_misbinding_countsmaller0_allratioconj_{label}_global_{unique_id}.pdf') f = plt.figure() plt.plot(ratio_space, np.max(em_pk, axis=-1), 'g', linewidth=2) plt.ylabel('Mixture proportion, correct') plt.xlabel('Proportion of conjunctive units') plt.grid() if savefigs: dataio.save_current_figure('results_misbinding_emmixture_allratioconj_{label}_global_{unique_id}.pdf') # Put everything on one figure f = plt.figure(figsize=(10, 6)) norm_for_plot = lambda x: (x - np.min(x))/np.max((x - np.min(x))) plt.plot(ratio_space, norm_for_plot(stats['std']), ratio_space, norm_for_plot(compute_angle_precision_from_std(stats['std'], square_precision=False)), ratio_space, norm_for_plot(prob_smaller0), ratio_space, norm_for_plot(em_pk[:, 1]), ratio_space, norm_for_plot(em_pk[:, 0])) plt.legend(('Std dev', 'Precision', 'Prob smaller 1', 'Mixture proportion correct', 'Mixture proportion misbinding')) # plt.plot(ratio_space, norm_for_plot(compute_angle_precision_from_std(stats['std'], square_precision=False)), ratio_space, norm_for_plot(em_pk[:, 1]), linewidth=2) # plt.legend(('Precision', 'Mixture proportion correct'), loc='best') plt.grid() if savefigs: dataio.save_current_figure('results_misbinding_allmetrics_allratioconj_{label}_global_{unique_id}.pdf') if plot_mixtmodel: # Fit Paul's model target_angle = np.ones(N)*fixed_means[1] nontarget_angles = np.ones((N, 1))*fixed_means[0] for ratio_conj_i, ratio_conj in enumerate(ratio_space): print "Ratio: ", ratio_conj responses = result_all_thetas[ratio_conj_i] curr_params_fit = em_circularmixture.fit(responses, target_angle, nontarget_angles) result_em_fits[ratio_conj_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random', 'train_LL')] print curr_params_fit if False: f, ax = plt.subplots() ax2 = ax.twinx() # left axis, kappa ax = plot_mean_std_area(ratio_space, result_em_fits[:, 0], 0*result_em_fits[:, 0], xlabel='Proportion of conjunctive units', ylabel="Inverse variance $[rad^{-2}]$", ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Fitted kappa', color='k') # Right axis, mixture probabilities plot_mean_std_area(ratio_space, result_em_fits[:, 1], 0*result_em_fits[:, 1], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Target') plot_mean_std_area(ratio_space, result_em_fits[:, 2], 0*result_em_fits[:, 2], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Nontarget') plot_mean_std_area(ratio_space, result_em_fits[:, 3], 0*result_em_fits[:, 3], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Random') lines, labels = ax.get_legend_handles_labels() lines2, labels2 = ax2.get_legend_handles_labels() ax.legend(lines + lines2, labels + labels2, fontsize=12, loc='right') # ax.set_xlim([0.9, 5.1]) # ax.set_xticks(range(1, 6)) # ax.set_xticklabels(range(1, 6)) plt.grid() f.canvas.draw() if True: # Right axis, mixture probabilities ax = plot_mean_std_area(ratio_space, result_em_fits[:, 1], 0*result_em_fits[:, 1], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", linewidth=3, fmt='-', markersize=8, label='Target') plot_mean_std_area(ratio_space, result_em_fits[:, 2], 0*result_em_fits[:, 2], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='-', markersize=8, label='Nontarget') plot_mean_std_area(ratio_space, result_em_fits[:, 3], 0*result_em_fits[:, 3], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='-', markersize=8, label='Random') ax.legend(loc='right') # ax.set_xlim([0.9, 5.1]) # ax.set_xticks(range(1, 6)) # ax.set_xticklabels(range(1, 6)) plt.grid() if savefigs: dataio.save_current_figure('results_misbinding_emmixture_allratioconj_{label}_global_{unique_id}.pdf') # plt.figure() # plt.plot(ratio_MMlower, results_filtered_smoothed/np.max(results_filtered_smoothed, axis=0), linewidth=2) # plt.plot(ratio_MMlower[np.argmax(results_filtered_smoothed, axis=0)], np.ones(results_filtered_smoothed.shape[-1]), 'ro', markersize=10) # plt.grid() # plt.ylim((0., 1.1)) # plt.subplots_adjust(right=0.8) # plt.legend(['%d item' % i + 's'*(i>1) for i in xrange(1, T+1)], loc='center right', bbox_to_anchor=(1.3, 0.5)) # plt.xticks(np.linspace(0, 1.0, 5)) all_args = data_pbs.loaded_data['args_list'] variables_to_save = [] if savedata: dataio.save_variables_default(locals(), variables_to_save) dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='misbindings') plt.show() return locals()
def fit_mixture_model(self): N = self.dataset['probe'].size # Initialize empty arrays and dicts self.dataset['em_fits'] = dict(kappa=np.empty(N), mixt_target=np.empty(N), mixt_nontargets=np.empty(N), mixt_nontargets_sum=np.empty(N), mixt_random=np.empty(N), resp_target=np.empty(N), resp_nontarget=np.empty(N), resp_random=np.empty(N), train_LL=np.empty(N), test_LL=np.empty(N), K=np.empty(N), bic=np.empty(N), aic=np.empty(N), ) for key in self.dataset['em_fits']: self.dataset['em_fits'][key].fill(np.nan) self.dataset['target'] = np.empty(N) self.dataset['em_fits_subjects_nitems'] = dict() for subject in np.unique(self.dataset['subject']): self.dataset['em_fits_subjects_nitems'][subject] = dict() self.dataset['em_fits_nitems'] = dict(mean=dict(), std=dict(), values=dict()) # Compute mixture model fits per n_items and per subject for n_items in np.unique(self.dataset['n_items']): for subject in np.unique(self.dataset['subject']): ids_filter = (self.dataset['subject'] == subject).flatten() & \ (self.dataset['n_items'] == n_items).flatten() print "Fit mixture model, %d items, subject %d, %d datapoints" % (subject, n_items, np.sum(ids_filter)) self.dataset['target'][ids_filter] = self.dataset['item_angle'][ids_filter, 0] params_fit = em_circmixtmodel.fit( self.dataset['response'][ids_filter, 0], self.dataset['item_angle'][ids_filter, 0], self.dataset['item_angle'][ids_filter, 1:] ) params_fit['mixt_nontargets_sum'] = np.sum( params_fit['mixt_nontargets'] ) resp = em_circmixtmodel.compute_responsibilities( self.dataset['response'][ids_filter, 0], self.dataset['item_angle'][ids_filter, 0], self.dataset['item_angle'][ids_filter, 1:], params_fit ) # Copy all data for k, v in params_fit.iteritems(): self.dataset['em_fits'][k][ids_filter] = v self.dataset['em_fits']['resp_target'][ids_filter] = \ resp['target'] self.dataset['em_fits']['resp_nontarget'][ids_filter] = \ np.sum(resp['nontargets'], axis=1) self.dataset['em_fits']['resp_random'][ids_filter] = \ resp['random'] self.dataset['em_fits_subjects_nitems'][subject][n_items] = params_fit ## Now compute mean/std em_fits per n_items self.dataset['em_fits_nitems']['mean'][n_items] = dict() self.dataset['em_fits_nitems']['std'][n_items] = dict() self.dataset['em_fits_nitems']['values'][n_items] = dict() # Need to extract the values for a subject/nitems pair, for all keys of em_fits. Annoying dictionary indexing needed emfits_keys = params_fit.keys() for key in emfits_keys: values_allsubjects = [self.dataset['em_fits_subjects_nitems'][subject][n_items][key] for subject in np.unique(self.dataset['subject'])] self.dataset['em_fits_nitems']['mean'][n_items][key] = np.mean(values_allsubjects) self.dataset['em_fits_nitems']['std'][n_items][key] = np.std(values_allsubjects) self.dataset['em_fits_nitems']['values'][n_items][key] = values_allsubjects ## Construct array versions of the em_fits_nitems mixture proportions, for convenience self.construct_arrays_em_fits()
def launcher_do_hierarchical_special_stimuli_varyMMlower(args): ''' Fit Hierarchical model, varying the ratio of M to Mlower See how the precision of recall and mixture model parameters evolve ''' print "Doing a piece of work for launcher_do_mixed_special_stimuli" try: # Convert Argparse.Namespace to dict all_parameters = vars(args) except TypeError: # Assume it's already done assert type(args) is dict, "args is neither Namespace nor dict, WHY?" all_parameters = args print all_parameters # Create DataIO # (complete label with current variable state) dataio = DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters)) save_every = 1 run_counter = 0 # Parameters to vary M_space = np.arange(1, all_parameters['M']+1) M_lower_space = np.arange(2, all_parameters['M']+1, 2) MMlower_all = np.array(cross(M_space, M_lower_space)) MMlower_valid_space = MMlower_all[np.nonzero(np.sum(MMlower_all, axis=1) == all_parameters['M'])[0]] # limit space, not too big... MMlower_valid_space = MMlower_valid_space[::5] print "MMlower size", MMlower_valid_space.shape[0] # Result arrays result_all_precisions = np.nan*np.ones((MMlower_valid_space.shape[0], all_parameters['num_repetitions'])) result_em_fits = np.nan*np.ones((MMlower_valid_space.shape[0], 5, all_parameters['num_repetitions'])) # kappa, mixt_target, mixt_nontarget, mixt_random, ll result_em_resp = np.nan*np.ones((MMlower_valid_space.shape[0], 1+all_parameters['T'], all_parameters['N'], all_parameters['num_repetitions'])) # If desired, will automatically save all Model responses. if all_parameters['subaction'] == 'collect_responses': result_responses = np.nan*np.ones((MMlower_valid_space.shape[0], all_parameters['N'], all_parameters['num_repetitions'])) result_target = np.nan*np.ones((MMlower_valid_space.shape[0], all_parameters['N'], all_parameters['num_repetitions'])) result_nontargets = np.nan*np.ones((MMlower_valid_space.shape[0], all_parameters['N'], all_parameters['T']-1, all_parameters['num_repetitions'])) search_progress = progress.Progress(MMlower_valid_space.shape[0]*all_parameters['num_repetitions']) for repet_i in xrange(all_parameters['num_repetitions']): for MMlower_i, MMlower in enumerate(MMlower_valid_space): print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str()) print "Fit for M=%d, Mlower=%d, %d/%d" % (MMlower[0], MMlower[1], repet_i+1, all_parameters['num_repetitions']) # Update parameter all_parameters['M'] = MMlower[0] all_parameters['M_layer_one'] = MMlower[1] ### WORK WORK WORK work? ### # Generate specific stimuli all_parameters['stimuli_generation'] = 'specific_stimuli' all_parameters['code_type'] = 'hierarchical' # Instantiate (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters) # Sample sampler.run_inference(all_parameters) # Compute precision result_all_precisions[MMlower_i, repet_i] = sampler.get_precision() # Fit mixture model curr_params_fit = em_circularmixture.fit(*sampler.collect_responses()) curr_resp = em_circularmixture.compute_responsibilities(*(sampler.collect_responses() + (curr_params_fit,) )) print curr_params_fit result_em_fits[MMlower_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random', 'train_LL')] result_em_resp[MMlower_i, 0, :, repet_i] = curr_resp['target'] result_em_resp[MMlower_i, 1:-1, :, repet_i] = curr_resp['nontargets'].T result_em_resp[MMlower_i, -1, :, repet_i] = curr_resp['random'] # If needed, store responses if all_parameters['subaction'] == 'collect_responses': (responses, target, nontarget) = sampler.collect_responses() result_responses[MMlower_i, :, repet_i] = responses result_target[MMlower_i, :, repet_i] = target result_nontargets[MMlower_i, ..., repet_i] = nontarget print "collected responses" ### /Work ### search_progress.increment() if run_counter % save_every == 0 or search_progress.done(): dataio.save_variables_default(locals()) run_counter += 1 # Finished dataio.save_variables_default(locals()) print "All finished" return locals()
def launcher_do_mixed_special_stimuli(args): ''' Fit mixed model, varying the ratio_conj See how the precision of recall and mixture model parameters evolve ''' print "Doing a piece of work for launcher_do_mixed_special_stimuli" try: # Convert Argparse.Namespace to dict all_parameters = vars(args) except TypeError: # Assume it's already done assert type(args) is dict, "args is neither Namespace nor dict, WHY?" all_parameters = args print all_parameters # Create DataIO # (complete label with current variable state) dataio = DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters)) save_every = 1 run_counter = 0 # Parameters to vary ratio_space = (np.arange(0, all_parameters['M']**0.5)**2.)/all_parameters['M'] # Result arrays result_all_precisions = np.nan*np.ones((ratio_space.size, all_parameters['num_repetitions'])) result_em_fits = np.nan*np.ones((ratio_space.size, 5, all_parameters['num_repetitions'])) # kappa, mixt_target, mixt_nontarget, mixt_random, ll result_em_resp = np.nan*np.ones((ratio_space.size, 1+all_parameters['T'], all_parameters['N'], all_parameters['num_repetitions'])) # If desired, will automatically save all Model responses. if all_parameters['subaction'] == 'collect_responses': result_responses = np.nan*np.ones((ratio_space.size, all_parameters['N'], all_parameters['num_repetitions'])) result_target = np.nan*np.ones((ratio_space.size, all_parameters['N'], all_parameters['num_repetitions'])) result_nontargets = np.nan*np.ones((ratio_space.size, all_parameters['N'], all_parameters['T']-1, all_parameters['num_repetitions'])) search_progress = progress.Progress(ratio_space.size*all_parameters['num_repetitions']) for repet_i in xrange(all_parameters['num_repetitions']): for ratio_i, ratio_conj in enumerate(ratio_space): print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str()) print "Fit for ratio_conj=%.2f, %d/%d" % (ratio_conj, repet_i+1, all_parameters['num_repetitions']) # Update parameter all_parameters['ratio_conj'] = ratio_conj ### WORK WORK WORK work? ### # Generate specific stimuli all_parameters['stimuli_generation'] = 'specific_stimuli' # Instantiate (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(all_parameters) # Sample sampler.run_inference(all_parameters) # Compute precision result_all_precisions[ratio_i, repet_i] = sampler.get_precision() # Fit mixture model curr_params_fit = em_circularmixture.fit(*sampler.collect_responses()) curr_resp = em_circularmixture.compute_responsibilities(*(sampler.collect_responses() + (curr_params_fit,) )) result_em_fits[ratio_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random', 'train_LL')] result_em_resp[ratio_i, 0, :, repet_i] = curr_resp['target'] result_em_resp[ratio_i, 1:-1, :, repet_i] = curr_resp['nontargets'].T result_em_resp[ratio_i, -1, :, repet_i] = curr_resp['random'] print result_all_precisions[ratio_i, repet_i], curr_params_fit # If needed, store responses if all_parameters['subaction'] == 'collect_responses': (responses, target, nontarget) = sampler.collect_responses() result_responses[ratio_i, :, repet_i] = responses result_target[ratio_i, :, repet_i] = target result_nontargets[ratio_i, ..., repet_i] = nontarget print "collected responses" ### /Work ### search_progress.increment() if run_counter % save_every == 0 or search_progress.done(): dataio.save_variables_default(locals()) run_counter += 1 # Finished dataio.save_variables_default(locals()) print "All finished" return locals()
def plots_misbinding_logposterior(data_pbs, generator_module=None): ''' Reload 3D volume runs from PBS and plot them ''' #### SETUP # savedata = False savefigs = True plot_logpost = False plot_error = False plot_mixtmodel = True plot_hist_responses_fisherinfo = True compute_plot_bootstrap = False compute_fisher_info_perratioconj = True # mixturemodel_to_use = 'original' mixturemodel_to_use = 'allitems' # mixturemodel_to_use = 'allitems_kappafi' caching_fisherinfo_filename = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'cache_fisherinfo.pickle') # #### /SETUP print "Order parameters: ", generator_module.dict_parameters_range.keys() result_all_log_posterior = np.squeeze(data_pbs.dict_arrays['result_all_log_posterior']['results']) result_all_thetas = np.squeeze(data_pbs.dict_arrays['result_all_thetas']['results']) ratio_space = data_pbs.loaded_data['parameters_uniques']['ratio_conj'] print ratio_space print result_all_log_posterior.shape N = result_all_thetas.shape[-1] result_prob_wrong = np.zeros((ratio_space.size, N)) result_em_fits = np.empty((ratio_space.size, 6))*np.nan all_args = data_pbs.loaded_data['args_list'] fixed_means = [-np.pi*0.6, np.pi*0.6] all_angles = np.linspace(-np.pi, np.pi, result_all_log_posterior.shape[-1]) dataio = DataIO(output_folder=generator_module.pbs_submission_infos['simul_out_dir'] + '/outputs/', label='global_' + dataset_infos['save_output_filename']) plt.rcParams['font.size'] = 18 if plot_hist_responses_fisherinfo: # From cache if caching_fisherinfo_filename is not None: if os.path.exists(caching_fisherinfo_filename): # Got file, open it and try to use its contents try: with open(caching_fisherinfo_filename, 'r') as file_in: # Load and assign values cached_data = pickle.load(file_in) result_fisherinfo_ratio = cached_data['result_fisherinfo_ratio'] compute_fisher_info_perratioconj = False except IOError: print "Error while loading ", caching_fisherinfo_filename, "falling back to computing the Fisher Info" if compute_fisher_info_perratioconj: # We did not save the Fisher info, but need it if we want to fit the mixture model with fixed kappa. So recompute them using the args_dicts result_fisherinfo_ratio = np.empty(ratio_space.shape) # Invert the all_args_i -> ratio_conj direction parameters_indirections = data_pbs.loaded_data['parameters_dataset_index'] for ratio_conj_i, ratio_conj in enumerate(ratio_space): # Get index of first dataset with the current ratio_conj (no need for the others, I think) arg_index = parameters_indirections[(ratio_conj,)][0] # Now using this dataset, reconstruct a RandomFactorialNetwork and compute the fisher info curr_args = all_args[arg_index] curr_args['stimuli_generation'] = lambda T: np.linspace(-np.pi*0.6, np.pi*0.6, T) (random_network, data_gen, stat_meas, sampler) = launchers.init_everything(curr_args) # Theo Fisher info result_fisherinfo_ratio[ratio_conj_i] = sampler.estimate_fisher_info_theocov() del curr_args['stimuli_generation'] # Save everything to a file, for faster later plotting if caching_fisherinfo_filename is not None: try: with open(caching_fisherinfo_filename, 'w') as filecache_out: data_cache = dict(result_fisherinfo_ratio=result_fisherinfo_ratio) pickle.dump(data_cache, filecache_out, protocol=2) except IOError: print "Error writing out to caching file ", caching_fisherinfo_filename # Now plots. Do histograms of responses (around -pi/6 and pi/6), add Von Mises derived from Theo FI on top, and vertical lines for the correct target/nontarget angles. for ratio_conj_i, ratio_conj in enumerate(ratio_space): # Histogram ax = utils.hist_angular_data(result_all_thetas[ratio_conj_i], bins=100, title='ratio %.2f, fi %.0f' % (ratio_conj, result_fisherinfo_ratio[ratio_conj_i])) bar_heights, _, _ = utils.histogram_binspace(result_all_thetas[ratio_conj_i], bins=100, norm='density') # Add Fisher info prediction on top x = np.linspace(-np.pi, np.pi, 1000) if result_fisherinfo_ratio[ratio_conj_i] < 700: # Von Mises PDF utils.plot_vonmises_pdf(x, utils.stddev_to_kappa(1./result_fisherinfo_ratio[ratio_conj_i]**0.5), mu=fixed_means[-1], ax_handle=ax, linewidth=3, color='r', scale=np.max(bar_heights), fmt='-') else: # Switch to Gaussian instead utils.plot_normal_pdf(x, mu=fixed_means[-1], std=1./result_fisherinfo_ratio[ratio_conj_i]**0.5, ax_handle=ax, linewidth=3, color='r', scale=np.max(bar_heights), fmt='-') # ax.set_xticks([]) # ax.set_yticks([]) # Add vertical line to correct target/nontarget ax.axvline(x=fixed_means[0], color='g', linewidth=2) ax.axvline(x=fixed_means[1], color='r', linewidth=2) ax.get_figure().canvas.draw() if savefigs: # plt.tight_layout() dataio.save_current_figure('results_misbinding_histresponses_vonmisespdf_ratioconj%.2f{label}_{unique_id}.pdf' % (ratio_conj)) if plot_logpost: for ratio_conj_i, ratio_conj in enumerate(ratio_space): # ax = utils.plot_mean_std_area(all_angles, nanmean(result_all_log_posterior[ratio_conj_i], axis=0), nanstd(result_all_log_posterior[ratio_conj_i], axis=0)) # ax.set_xlim((-np.pi, np.pi)) # ax.set_xticks((-np.pi, -np.pi / 2, 0, np.pi / 2., np.pi)) # ax.set_xticklabels((r'$-\pi$', r'$-\frac{\pi}{2}$', r'$0$', r'$\frac{\pi}{2}$', r'$\pi$')) # ax.set_yticks(()) # ax.get_figure().canvas.draw() # if savefigs: # dataio.save_current_figure('results_misbinding_logpost_ratioconj%.2f_{label}_global_{unique_id}.pdf' % ratio_conj) # Compute the probability of answering wrongly (from fitting mixture distrib onto posterior) for n in xrange(result_all_log_posterior.shape[1]): result_prob_wrong[ratio_conj_i, n], _, _ = utils.fit_gaussian_mixture_fixedmeans(all_angles, np.exp(result_all_log_posterior[ratio_conj_i, n]), fixed_means=fixed_means, normalise=True, return_fitted_data=False, should_plot=False) # ax = utils.plot_mean_std_area(ratio_space, nanmean(result_prob_wrong, axis=-1), nanstd(result_prob_wrong, axis=-1)) plt.figure() plt.plot(ratio_space, utils.nanmean(result_prob_wrong, axis=-1)) # ax.get_figure().canvas.draw() if savefigs: dataio.save_current_figure('results_misbinding_probwrongpost_allratioconj_{label}_global_{unique_id}.pdf') if plot_error: ## Compute Standard deviation/precision from samples and plot it as a function of ratio_conj stats = utils.compute_mean_std_circular_data(utils.wrap_angles(result_all_thetas - fixed_means[1]).T) f = plt.figure() plt.plot(ratio_space, stats['std']) plt.ylabel('Standard deviation [rad]') if savefigs: dataio.save_current_figure('results_misbinding_stddev_allratioconj_{label}_global_{unique_id}.pdf') f = plt.figure() plt.plot(ratio_space, utils.compute_angle_precision_from_std(stats['std'], square_precision=False), linewidth=2) plt.ylabel('Precision [$1/rad$]') plt.xlabel('Proportion of conjunctive units') plt.grid() if savefigs: dataio.save_current_figure('results_misbinding_precision_allratioconj_{label}_global_{unique_id}.pdf') ## Compute the probability of misbinding # 1) Just count samples < 0 / samples tot # 2) Fit a mixture model, average over mixture probabilities prob_smaller0 = np.sum(result_all_thetas <= 1, axis=1)/float(result_all_thetas.shape[1]) em_centers = np.zeros((ratio_space.size, 2)) em_covs = np.zeros((ratio_space.size, 2)) em_pk = np.zeros((ratio_space.size, 2)) em_ll = np.zeros(ratio_space.size) for ratio_conj_i, ratio_conj in enumerate(ratio_space): cen_lst, cov_lst, em_pk[ratio_conj_i], em_ll[ratio_conj_i] = pygmm.em(result_all_thetas[ratio_conj_i, np.newaxis].T, K = 2, max_iter = 400, init_kw={'cluster_init':'fixed', 'fixed_means': fixed_means}) em_centers[ratio_conj_i] = np.array(cen_lst).flatten() em_covs[ratio_conj_i] = np.array(cov_lst).flatten() # print em_centers # print em_covs # print em_pk f = plt.figure() plt.plot(ratio_space, prob_smaller0) plt.ylabel('Misbound proportion') if savefigs: dataio.save_current_figure('results_misbinding_countsmaller0_allratioconj_{label}_global_{unique_id}.pdf') f = plt.figure() plt.plot(ratio_space, np.max(em_pk, axis=-1), 'g', linewidth=2) plt.ylabel('Mixture proportion, correct') plt.xlabel('Proportion of conjunctive units') plt.grid() if savefigs: dataio.save_current_figure('results_misbinding_emmixture_allratioconj_{label}_global_{unique_id}.pdf') # Put everything on one figure f = plt.figure(figsize=(10, 6)) norm_for_plot = lambda x: (x - np.min(x))/np.max((x - np.min(x))) plt.plot(ratio_space, norm_for_plot(stats['std']), ratio_space, norm_for_plot(utils.compute_angle_precision_from_std(stats['std'], square_precision=False)), ratio_space, norm_for_plot(prob_smaller0), ratio_space, norm_for_plot(em_pk[:, 1]), ratio_space, norm_for_plot(em_pk[:, 0])) plt.legend(('Std dev', 'Precision', 'Prob smaller 1', 'Mixture proportion correct', 'Mixture proportion misbinding')) # plt.plot(ratio_space, norm_for_plot(compute_angle_precision_from_std(stats['std'], square_precision=False)), ratio_space, norm_for_plot(em_pk[:, 1]), linewidth=2) # plt.legend(('Precision', 'Mixture proportion correct'), loc='best') plt.grid() if savefigs: dataio.save_current_figure('results_misbinding_allmetrics_allratioconj_{label}_global_{unique_id}.pdf') if plot_mixtmodel: # Fit Paul's model target_angle = np.ones(N)*fixed_means[1] nontarget_angles = np.ones((N, 1))*fixed_means[0] for ratio_conj_i, ratio_conj in enumerate(ratio_space): print "Ratio: ", ratio_conj responses = result_all_thetas[ratio_conj_i] if mixturemodel_to_use == 'allitems_kappafi': curr_params_fit = em_circularmixture_allitems_kappafi.fit(responses, target_angle, nontarget_angles, kappa=result_fisherinfo_ratio[ratio_conj_i]) elif mixturemodel_to_use == 'allitems': curr_params_fit = em_circularmixture_allitems_uniquekappa.fit(responses, target_angle, nontarget_angles) else: curr_params_fit = em_circularmixture.fit(responses, target_angle, nontarget_angles) result_em_fits[ratio_conj_i] = [curr_params_fit['kappa'], curr_params_fit['mixt_target']] + utils.arrnum_to_list(curr_params_fit['mixt_nontargets']) + [curr_params_fit[key] for key in ('mixt_random', 'train_LL', 'bic')] print curr_params_fit if False: f, ax = plt.subplots() ax2 = ax.twinx() # left axis, kappa ax = utils.plot_mean_std_area(ratio_space, result_em_fits[:, 0], 0*result_em_fits[:, 0], xlabel='Proportion of conjunctive units', ylabel="Inverse variance $[rad^{-2}]$", ax_handle=ax, linewidth=3, fmt='o-', markersize=8, label='Fitted kappa', color='k') # Right axis, mixture probabilities utils.plot_mean_std_area(ratio_space, result_em_fits[:, 1], 0*result_em_fits[:, 1], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Target') utils.plot_mean_std_area(ratio_space, result_em_fits[:, 2], 0*result_em_fits[:, 2], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Nontarget') utils.plot_mean_std_area(ratio_space, result_em_fits[:, 3], 0*result_em_fits[:, 3], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax2, linewidth=3, fmt='o-', markersize=8, label='Random') lines, labels = ax.get_legend_handles_labels() lines2, labels2 = ax2.get_legend_handles_labels() ax.legend(lines + lines2, labels + labels2, fontsize=12, loc='right') # ax.set_xlim([0.9, 5.1]) # ax.set_xticks(range(1, 6)) # ax.set_xticklabels(range(1, 6)) plt.grid() f.canvas.draw() if True: # Mixture probabilities ax = utils.plot_mean_std_area(ratio_space, result_em_fits[:, 1], 0*result_em_fits[:, 1], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", linewidth=3, fmt='-', markersize=8, label='Target') utils.plot_mean_std_area(ratio_space, result_em_fits[:, 2], 0*result_em_fits[:, 2], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='-', markersize=8, label='Nontarget') utils.plot_mean_std_area(ratio_space, result_em_fits[:, 3], 0*result_em_fits[:, 3], xlabel='Proportion of conjunctive units', ylabel="Mixture probabilities", ax_handle=ax, linewidth=3, fmt='-', markersize=8, label='Random') ax.legend(loc='right') # ax.set_xlim([0.9, 5.1]) # ax.set_xticks(range(1, 6)) # ax.set_xticklabels(range(1, 6)) plt.grid() if savefigs: dataio.save_current_figure('results_misbinding_emmixture_allratioconj_{label}_global_{unique_id}.pdf') if True: # Kappa # ax = utils.plot_mean_std_area(ratio_space, result_em_fits[:, 0], 0*result_em_fits[:, 0], xlabel='Proportion of conjunctive units', ylabel="$\kappa [rad^{-2}]$", linewidth=3, fmt='-', markersize=8, label='Kappa') ax = utils.plot_mean_std_area(ratio_space, utils.kappa_to_stddev(result_em_fits[:, 0]), 0*result_em_fits[:, 2], xlabel='Proportion of conjunctive units', ylabel="Standard deviation [rad]", linewidth=3, fmt='-', markersize=8, label='Mixture model $\kappa$') # Add Fisher Info theo ax = utils.plot_mean_std_area(ratio_space, utils.kappa_to_stddev(result_fisherinfo_ratio), 0*result_em_fits[:, 2], xlabel='Proportion of conjunctive units', ylabel="Standard deviation [rad]", linewidth=3, fmt='-', markersize=8, label='Fisher Information', ax_handle=ax) ax.legend(loc='best') # ax.set_xlim([0.9, 5.1]) # ax.set_xticks(range(1, 6)) # ax.set_xticklabels(range(1, 6)) plt.grid() if savefigs: dataio.save_current_figure('results_misbinding_kappa_allratioconj_{label}_global_{unique_id}.pdf') if compute_plot_bootstrap: ## Compute the bootstrap pvalue for each ratio # use the bootstrap CDF from mixed runs, not the exact current ones, not sure if good idea. bootstrap_to_load = 1 if bootstrap_to_load == 1: cache_bootstrap_fn = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'outputs', 'cache_bootstrap_mixed_from_bootstrapnontargets.pickle') bootstrap_ecdf_sum_label = 'bootstrap_ecdf_allitems_sum_sigmax_T' bootstrap_ecdf_all_label = 'bootstrap_ecdf_allitems_all_sigmax_T' elif bootstrap_to_load == 2: cache_bootstrap_fn = os.path.join(generator_module.pbs_submission_infos['simul_out_dir'], 'outputs', 'cache_bootstrap_misbinding_mixed.pickle') bootstrap_ecdf_sum_label = 'bootstrap_ecdf_allitems_sum_ratioconj' bootstrap_ecdf_all_label = 'bootstrap_ecdf_allitems_all_ratioconj' try: with open(cache_bootstrap_fn, 'r') as file_in: # Load and assign values cached_data = pickle.load(file_in) assert bootstrap_ecdf_sum_label in cached_data assert bootstrap_ecdf_all_label in cached_data should_fit_bootstrap = False except IOError: print "Error while loading ", cache_bootstrap_fn # Select the ECDF to use if bootstrap_to_load == 1: sigmax_i = 3 # corresponds to sigmax = 2, input here. T_i = 1 # two possible targets here. bootstrap_ecdf_sum_used = cached_data[bootstrap_ecdf_sum_label][sigmax_i][T_i]['ecdf'] bootstrap_ecdf_all_used = cached_data[bootstrap_ecdf_all_label][sigmax_i][T_i]['ecdf'] elif bootstrap_to_load == 2: ratio_conj_i = 4 bootstrap_ecdf_sum_used = cached_data[bootstrap_ecdf_sum_label][ratio_conj_i]['ecdf'] bootstrap_ecdf_all_used = cached_data[bootstrap_ecdf_all_label][ratio_conj_i]['ecdf'] result_pvalue_bootstrap_sum = np.empty(ratio_space.size)*np.nan result_pvalue_bootstrap_all = np.empty((ratio_space.size, nontarget_angles.shape[-1]))*np.nan for ratio_conj_i, ratio_conj in enumerate(ratio_space): print "Ratio: ", ratio_conj responses = result_all_thetas[ratio_conj_i] bootstrap_allitems_nontargets_allitems_uniquekappa = em_circularmixture_allitems_uniquekappa.bootstrap_nontarget_stat(responses, target_angle, nontarget_angles, sumnontargets_bootstrap_ecdf=bootstrap_ecdf_sum_used, allnontargets_bootstrap_ecdf=bootstrap_ecdf_all_used) result_pvalue_bootstrap_sum[ratio_conj_i] = bootstrap_allitems_nontargets_allitems_uniquekappa['p_value'] result_pvalue_bootstrap_all[ratio_conj_i] = bootstrap_allitems_nontargets_allitems_uniquekappa['allnontarget_p_value'] ## Plots # f, ax = plt.subplots() # ax.plot(ratio_space, result_pvalue_bootstrap_all, linewidth=2) # if savefigs: # dataio.save_current_figure("pvalue_bootstrap_all_ratioconj_{label}_{unique_id}.pdf") f, ax = plt.subplots() ax.plot(ratio_space, result_pvalue_bootstrap_sum, linewidth=2) plt.grid() if savefigs: dataio.save_current_figure("pvalue_bootstrap_sum_ratioconj_{label}_{unique_id}.pdf") # plt.figure() # plt.plot(ratio_MMlower, results_filtered_smoothed/np.max(results_filtered_smoothed, axis=0), linewidth=2) # plt.plot(ratio_MMlower[np.argmax(results_filtered_smoothed, axis=0)], np.ones(results_filtered_smoothed.shape[-1]), 'ro', markersize=10) # plt.grid() # plt.ylim((0., 1.1)) # plt.subplots_adjust(right=0.8) # plt.legend(['%d item' % i + 's'*(i>1) for i in xrange(1, T+1)], loc='center right', bbox_to_anchor=(1.3, 0.5)) # plt.xticks(np.linspace(0, 1.0, 5)) variables_to_save = ['target_angle', 'nontarget_angles'] if savedata: dataio.save_variables_default(locals(), variables_to_save) dataio.make_link_output_to_dropbox(dropbox_current_experiment_folder='misbindings') plt.show() return locals()
def check_precision_sensitivity_determ(): ''' Let's construct a situation where we have one Von Mises component and one random component. See how the random component affects the basic precision estimator we use elsewhere. ''' N = 1000 kappa_space = np.array([3., 10., 20.]) # kappa_space = np.array([3.]) nb_repeats = 20 ratio_to_kappa = False savefigs = True precision_nb_samples = 101 N_rnd_space = np.linspace(0, N/2, precision_nb_samples).astype(int) precision_all = np.zeros((N_rnd_space.size, nb_repeats)) kappa_estimated_all = np.zeros((N_rnd_space.size, nb_repeats)) precision_squared_all = np.zeros((N_rnd_space.size, nb_repeats)) kappa_mixtmodel_all = np.zeros((N_rnd_space.size, nb_repeats)) mixtmodel_all = np.zeros((N_rnd_space.size, nb_repeats, 2)) dataio = DataIO.DataIO() target_samples = np.zeros(N) for kappa in kappa_space: true_kappa = kappa*np.ones(N_rnd_space.size) # First sample all as von mises samples_all = spst.vonmises.rvs(kappa, size=(N_rnd_space.size, nb_repeats, N)) for repeat in progress.ProgressDisplay(xrange(nb_repeats)): for i, N_rnd in enumerate(N_rnd_space): samples = samples_all[i, repeat] # Then set K of them to random [-np.pi, np.pi] values. samples[np.random.randint(N, size=N_rnd)] = utils.sample_angle(N_rnd) # Estimate precision from those samples. precision_all[i, repeat] = utils.compute_precision_samples(samples, square_precision=False, remove_chance_level=False) precision_squared_all[i, repeat] = utils.compute_precision_samples(samples, square_precision=True) # convert circular std dev back to kappa kappa_estimated_all[i, repeat] = utils.stddev_to_kappa(1./precision_all[i, repeat]) # Fit mixture model params_fit = em_circularmixture.fit(samples, target_samples) kappa_mixtmodel_all[i, repeat] = params_fit['kappa'] mixtmodel_all[i, repeat] = params_fit['mixt_target'], params_fit['mixt_random'] print "%d/%d N_rnd: %d, Kappa: %.3f, precision: %.3f, kappa_tilde: %.3f, precision^2: %.3f, kappa_mixtmod: %.3f" % (repeat, nb_repeats, N_rnd, kappa, precision_all[i, repeat], kappa_estimated_all[i, repeat], precision_squared_all[i, repeat], kappa_mixtmodel_all[i, repeat]) if ratio_to_kappa: precision_all /= kappa precision_squared_all /= kappa kappa_estimated_all /= kappa true_kappa /= kappa f, ax = plt.subplots() ax.plot(N_rnd_space/float(N), true_kappa, 'k-', linewidth=3, label='Kappa_true') utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(precision_all, axis=-1), np.std(precision_all, axis=-1), ax_handle=ax, label='precision') utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(precision_squared_all, axis=-1), np.std(precision_squared_all, axis=-1), ax_handle=ax, label='precision^2') utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(kappa_estimated_all, axis=-1), np.std(kappa_estimated_all, axis=-1), ax_handle=ax, label='kappa_tilde') utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(kappa_mixtmodel_all, axis=-1), np.std(kappa_mixtmodel_all, axis=-1), ax_handle=ax, label='kappa mixt model') ax.legend() ax.set_title('Effect of random samples on precision. kappa: %.2f. ratiokappa %s' % (kappa, ratio_to_kappa)) ax.set_xlabel('Proportion random samples. N tot %d' % N) ax.set_ylabel('Kappa/precision (not same units)') f.canvas.draw() if savefigs: dataio.save_current_figure("precision_sensitivity_kappa%dN%d_{unique_id}.pdf" % (kappa, N)) # Do another plot, with kappa and mixt_target/mixt_random. Use left/right axis separately f, ax = plt.subplots() ax2 = ax.twinx() # left axis, kappa ax.plot(N_rnd_space/float(N), true_kappa, 'k-', linewidth=3, label='kappa true') utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(kappa_mixtmodel_all, axis=-1), np.std(kappa_mixtmodel_all, axis=-1), ax_handle=ax, label='kappa') # Right axis, mixture probabilities utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(mixtmodel_all[..., 0], axis=-1), np.std(mixtmodel_all[..., 0], axis=-1), ax_handle=ax2, label='mixt target', color='r') utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(mixtmodel_all[..., 1], axis=-1), np.std(mixtmodel_all[..., 1], axis=-1), ax_handle=ax2, label='mixt random', color='g') ax.set_title('Mixture model parameters evolution. kappa: %.2f, ratiokappa %s' % (kappa, ratio_to_kappa)) ax.set_xlabel('Proportion random samples. N tot %d' % N) ax.set_ylabel('Kappa') ax2.set_ylabel('Mixture proportions') lines, labels = ax.get_legend_handles_labels() lines2, labels2 = ax2.get_legend_handles_labels() ax.legend(lines + lines2, labels + labels2) if savefigs: dataio.save_current_figure("precision_sensitivity_mixtmodel_kappa%dN%d_{unique_id}.pdf" % (kappa, N)) return locals()
def compute_bootstrap_samples(dataset, nb_bootstrap_samples, angle_space): responses_resampled = np.empty( (np.unique(dataset['n_items']).size, nb_bootstrap_samples), dtype=np.object) error_nontargets_resampled = np.empty( (np.unique(dataset['n_items']).size, nb_bootstrap_samples), dtype=np.object) error_targets_resampled = np.empty( (np.unique(dataset['n_items']).size, nb_bootstrap_samples), dtype=np.object) hist_cnts_nontarget_bootstraps_nitems = np.empty( (np.unique(dataset['n_items']).size, nb_bootstrap_samples, angle_space.size - 1))*np.nan hist_cnts_target_bootstraps_nitems = np.empty( (np.unique(dataset['n_items']).size, nb_bootstrap_samples, angle_space.size - 1))*np.nan bootstrap_data = { 'responses_resampled': responses_resampled, 'error_nontargets_resampled': error_nontargets_resampled, 'error_targets_resampled': error_targets_resampled, 'hist_cnts_nontarget_bootstraps_nitems': hist_cnts_nontarget_bootstraps_nitems, 'hist_cnts_target_bootstraps_nitems': hist_cnts_target_bootstraps_nitems, } for n_items_i, n_items in enumerate(np.unique(dataset['n_items'])): # Data collapsed accross subjects ids_filtered = (dataset['n_items'] == n_items).flatten() if n_items > 1: # Get random bootstrap nontargets bootstrap_nontargets = utils.sample_angle( dataset['item_angle'][ids_filtered, 1:n_items].shape + (nb_bootstrap_samples, )) # Compute associated EM fits # bootstrap_results = [] for bootstrap_i in progress.ProgressDisplay(np.arange(nb_bootstrap_samples), display=progress.SINGLE_LINE): em_fit = em_circularmixture.fit( dataset['response'][ids_filtered, 0], dataset['item_angle'][ids_filtered, 0], bootstrap_nontargets[..., bootstrap_i]) # bootstrap_results.append(em_fit) # Get EM samples responses_resampled[n_items_i, bootstrap_i] = ( em_circularmixture.sample_from_fit( em_fit, dataset['item_angle'][ids_filtered, 0], bootstrap_nontargets[..., bootstrap_i])) # Compute the errors error_nontargets_resampled[n_items_i, bootstrap_i] = ( utils.wrap_angles( responses_resampled[n_items_i, bootstrap_i][:, np.newaxis] - bootstrap_nontargets[..., bootstrap_i])) error_targets_resampled[n_items_i, bootstrap_i] = ( utils.wrap_angles( responses_resampled[n_items_i, bootstrap_i] - dataset['item_angle'][ids_filtered, 0])) # Bin everything (hist_cnts_nontarget_bootstraps_nitems[n_items_i, bootstrap_i], _, _) = ( utils.histogram_binspace( utils.dropnan( error_nontargets_resampled[n_items_i, bootstrap_i]), bins=angle_space, norm='density')) (hist_cnts_target_bootstraps_nitems[n_items_i, bootstrap_i], _, _) = ( utils.histogram_binspace( utils.dropnan( error_targets_resampled[n_items_i, bootstrap_i]), bins=angle_space, norm='density')) return bootstrap_data
def launcher_do_error_distributions_allT(args): ''' Compute histograms of errors distributions. Also get histogram of bias to nontargets. Do it for t=1...T items. Looks like the Bays 2009, used in paper. ''' print "Doing a piece of work for launcher_do_error_distributions_allT" try: # Convert Argparse.Namespace to dict all_parameters = vars(args) except TypeError: # Assume it's already done assert type(args) is dict, "args is neither Namespace nor dict, WHY?" all_parameters = args print all_parameters # Create DataIO # (complete label with current variable state) dataio = DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters)) save_every = 1 run_counter = 0 bins = 51 # Parameters to vary T_all = all_parameters['T'] T_space = np.arange(1, T_all+1) # Result arrays result_responses = np.nan*np.ones((T_space.size, all_parameters['N'], all_parameters['num_repetitions'])) result_target = np.nan*np.ones((T_space.size, all_parameters['N'], all_parameters['num_repetitions'])) result_nontargets = np.nan*np.ones((T_space.size, all_parameters['N'], T_all-1, all_parameters['num_repetitions'])) result_em_fits = np.nan*np.ones((T_space.size, 5, all_parameters['num_repetitions'])) # kappa, mixt_target, mixt_nontarget, mixt_random, ll search_progress = progress.Progress(T_space.size*all_parameters['num_repetitions']) for repet_i in xrange(all_parameters['num_repetitions']): for T_i, T in enumerate(T_space): print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str()) print "Fit for T=%d, %d/%d" % (T, repet_i+1, all_parameters['num_repetitions']) # Update parameter all_parameters['T'] = T ### WORK WORK WORK work? ### # Instantiate (_, _, _, sampler) = launchers.init_everything(all_parameters) # Sample sampler.run_inference(all_parameters) # Collect and store responses (responses, target, nontarget) = sampler.collect_responses() result_responses[T_i, :, repet_i] = responses result_target[T_i, :, repet_i] = target result_nontargets[T_i, :, :T_i, repet_i] = nontarget[:, :T_i] # Fit mixture model curr_params_fit = em_circularmixture.fit(*sampler.collect_responses()) result_em_fits[T_i, :, repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random', 'train_LL')] # Do plots sampler.plot_histogram_errors(bins=bins) dataio.save_current_figure('papertheo_histogram_errorsM%dsigmax%.2fT%d_{label}_{unique_id}.pdf' % tuple([all_parameters[key] for key in ('M', 'sigmax', 'T')])) if T > 1: sampler.plot_histogram_bias_nontarget(dataio=dataio) ### /Work ### search_progress.increment() if run_counter % save_every == 0 or search_progress.done(): dataio.save_variables_default(locals()) run_counter += 1 # Finished dataio.save_variables_default(locals()) print "All finished" return locals()
def launcher_do_error_distributions(args): ''' Collect responses for error distribution plots (used in generator/reloader_error_distribution_*.py) Do it for T items. Looks like the Bays 2009, used in paper. ''' print "Doing a piece of work for launcher_do_error_distributions" try: # Convert Argparse.Namespace to dict all_parameters = vars(args) except TypeError: # Assume it's already done assert type(args) is dict, "args is neither Namespace nor dict, WHY?" all_parameters = args print all_parameters # Create DataIO # (complete label with current variable state) dataio = DataIO(output_folder=all_parameters['output_directory'], label=all_parameters['label'].format(**all_parameters)) save_every = 1 run_counter = 0 # Result arrays result_responses = np.nan*np.ones((all_parameters['N'], all_parameters['num_repetitions'])) result_target = np.nan*np.ones((all_parameters['N'], all_parameters['num_repetitions'])) result_nontargets = np.nan*np.ones((all_parameters['N'], all_parameters['T']-1, all_parameters['num_repetitions'])) result_em_fits = np.nan*np.ones((5, all_parameters['num_repetitions'])) # kappa, mixt_target, mixt_nontarget, mixt_random, ll search_progress = progress.Progress(all_parameters['num_repetitions']) for repet_i in xrange(all_parameters['num_repetitions']): print "%.2f%%, %s left - %s" % (search_progress.percentage(), search_progress.time_remaining_str(), search_progress.eta_str()) print "Fit for T=%d, %d/%d" % (all_parameters['T'], repet_i+1, all_parameters['num_repetitions']) # Update parameter ### WORK WORK WORK work? ### # Instantiate (_, _, _, sampler) = launchers.init_everything(all_parameters) # Sample sampler.run_inference(all_parameters) # Collect and store responses (responses, target, nontarget) = sampler.collect_responses() result_responses[:, repet_i] = responses result_target[:, repet_i] = target result_nontargets[..., repet_i] = nontarget # Fit mixture model curr_params_fit = em_circularmixture.fit(*sampler.collect_responses()) result_em_fits[..., repet_i] = [curr_params_fit[key] for key in ('kappa', 'mixt_target', 'mixt_nontargets', 'mixt_random', 'train_LL')] ### /Work ### search_progress.increment() if run_counter % save_every == 0 or search_progress.done(): dataio.save_variables_default(locals()) run_counter += 1 # Finished dataio.save_variables_default(locals()) print "All finished" return locals()