def bootstrap_nontarget_stat(responses, target, nontargets=np.array([[]]), sumnontargets_bootstrap_ecdf=None, allnontargets_bootstrap_ecdf=None, nb_bootstrap_samples=100, resample_responses=False, resample_targets=False): ''' Performs a bootstrap evaluation of the nontarget mixture proportion distribution. Use that to construct a test for existence of misbinding errors ''' if sumnontargets_bootstrap_ecdf is None and allnontargets_bootstrap_ecdf is None: # Get samples if resample_responses: bootstrap_responses = utils.sample_angle((responses.size, nb_bootstrap_samples)) if resample_targets: bootstrap_targets = utils.sample_angle((responses.size, nb_bootstrap_samples)) bootstrap_nontargets = utils.sample_angle((nontargets.shape[0], nontargets.shape[1], nb_bootstrap_samples)) bootstrap_results = [] for i in progress.ProgressDisplay(np.arange(nb_bootstrap_samples), display=progress.SINGLE_LINE): if resample_responses and resample_targets: em_fit = fit(bootstrap_responses[..., i], bootstrap_targets[..., i], bootstrap_nontargets[..., i]) elif resample_responses and not resample_targets: em_fit = fit(bootstrap_responses[..., i], target, bootstrap_nontargets[..., i]) elif not resample_responses and resample_targets: em_fit = fit(responses, bootstrap_targets[..., i], bootstrap_nontargets[..., i]) elif not resample_responses and not resample_targets: em_fit = fit(responses, target, bootstrap_nontargets[..., i]) else: raise ValueError('Weird! %d %d' % (resample_responses, resample_targets)) bootstrap_results.append(em_fit) if resample_targets: if nontargets.shape[1] > 0: sumnontargets_bootstrap_samples = np.array([np.nansum(bootstr_res['mixt_nontargets']) for bootstr_res in bootstrap_results] + [bootstr_res['mixt_target'] for bootstr_res in bootstrap_results]) else: sumnontargets_bootstrap_samples = np.array([bootstr_res['mixt_target'] for bootstr_res in bootstrap_results]) else: sumnontargets_bootstrap_samples = np.array([np.sum(bootstr_res['mixt_nontargets']) for bootstr_res in bootstrap_results]) allnontargets_bootstrap_samples = np.array([bootstr_res['mixt_nontargets'] for bootstr_res in bootstrap_results]).flatten() # Estimate CDF sumnontargets_bootstrap_ecdf = stmodsdist.empirical_distribution.ECDF(sumnontargets_bootstrap_samples) allnontargets_bootstrap_ecdf = stmodsdist.empirical_distribution.ECDF(allnontargets_bootstrap_samples) else: allnontargets_bootstrap_samples = None sumnontargets_bootstrap_samples = None bootstrap_results = None # Compute the p-value for the current em_fit under the empirical CDF p_value_sum_bootstrap = np.nan p_value_all_bootstrap = np.nan em_fit = fit(responses, target, nontargets) if sumnontargets_bootstrap_ecdf is not None: p_value_sum_bootstrap = 1. - sumnontargets_bootstrap_ecdf(np.sum(em_fit['mixt_nontargets'])) if allnontargets_bootstrap_ecdf is not None: p_value_all_bootstrap = 1. - allnontargets_bootstrap_ecdf(em_fit['mixt_nontargets']) return dict(p_value=p_value_sum_bootstrap, nontarget_ecdf=sumnontargets_bootstrap_ecdf, em_fit=em_fit, nontarget_bootstrap_samples=sumnontargets_bootstrap_samples, bootstrap_results_all=bootstrap_results, allnontarget_bootstrap_samples=allnontargets_bootstrap_samples, allnontarget_ecdf=allnontargets_bootstrap_ecdf, allnontarget_p_value=p_value_all_bootstrap)
def test_bootstrap_nontargets(): ''' Check how the bootstrapped test for misbinding errors behaves ''' # Negative example N = 300 nb_nontargets = 1 kappa = 5.0 target = np.zeros(N) nontargets = utils.wrap_angles(np.linspace(0.0, 2*np.pi, nb_nontargets + 1, endpoint=False)[1:])*np.ones((N, nb_nontargets)) responses = spst.vonmises.rvs(kappa, size=(N)) responses[np.random.randint(N, size=N/3)] = utils.sample_angle(N/3) # em_fit = fit(responses, target, nontargets) bootstrap_results = bootstrap_nontarget_stat(responses, target, nontargets, nb_bootstrap_samples=100) print bootstrap_results assert bootstrap_results['p_value'] > 0.05, "No misbinding here, should not reject H0" # Positive example N = 1000 N_nontarget = N/5 N_rnd = N/10 angles_nontargets = np.array([-np.pi/3-1., 0.5+np.pi/2.]) K = angles_nontargets.size target = np.zeros(N) nontargets = np.ones((N, K))*angles_nontargets kappa = np.array([10.0]) # Sample from Von Mises responses = spst.vonmises.rvs(kappa, size=(N)) # Randomly displace some points to their nontarget location (should still be VonMises(kappa)) for k in xrange(K): curr_rand_indices = np.random.randint(N, size=N_nontarget/K) responses[curr_rand_indices] = spst.vonmises.rvs(kappa, size=(N)) responses[curr_rand_indices] += angles_nontargets[k] # Forces some points to be random responses[np.random.randint(N, size=N_rnd)] = utils.sample_angle(N_rnd) bootstrap_results = bootstrap_nontarget_stat(responses, target, nontargets, nb_bootstrap_samples=100) assert np.any(bootstrap_results['p_value'] < 0.10), "Clear misbinding, should have rejected H0"
def compute_sample_inverse_FI(self, inv_cov_stim, items_thetas=None, nitems=0, min_distance=0.17): ''' Compute one sample estimate of the Inverse Fisher Information, nitems. Those samples can then be averaged together in a Monte Carlo scheme to provide a better estimate of the Inverse Fisher Information for K items. Assume the first item is fixed at (0, 0), sample the other uniformly ''' if items_thetas is None: # target_item = utils.sample_angle(2) # target_item[0] = 0 target_item = np.zeros(2) all_items = [target_item] # Add extra items for item_i in xrange(nitems - 1): new_item = utils.sample_angle(size=2) while not utils.enforce_distance_set(new_item, all_items, min_distance): new_item = utils.sample_angle(size=2) all_items.append(new_item) items_thetas = np.array(all_items) else: nitems = items_thetas.shape[0] # Compute all derivatives deriv_mu = np.zeros((2 * nitems, self.M)) for i in xrange(nitems): deriv_mu[2 * i] = self.get_derivative_network_response( derivative_feature_target=0, stimulus_input=items_thetas[i]) deriv_mu[2 * i + 1] = self.get_derivative_network_response( derivative_feature_target=1, stimulus_input=items_thetas[i]) deriv_mu[np.isnan(deriv_mu)] = 0.0 # Compute the Fisher information matrix FI_nobj = np.dot(deriv_mu, np.dot(inv_cov_stim, deriv_mu.T)) try: inv_FI_nobj = np.linalg.inv(FI_nobj) except np.linalg.linalg.LinAlgError: inv_FI_nobj = np.nan * np.empty(FI_nobj.shape) return inv_FI_nobj[0, 0], FI_nobj[0, 0]
def bootstrap_nontarget_stat(responses, target, nontargets=np.array([[]]), nontarget_bootstrap_ecdf=None, nb_bootstrap_samples=100, resample_responses=False, resample_targets=False): ''' Performs a bootstrap evaluation of the nontarget mixture proportion distribution. Use that to construct a test for existence of misbinding errors ''' nontarget_bootstrap_samples = None bootstrap_results = [] if nontarget_bootstrap_ecdf is None: # Get samples if resample_responses: bootstrap_responses = utils.sample_angle((responses.size, nb_bootstrap_samples)) if resample_targets: bootstrap_targets = utils.sample_angle((responses.size, nb_bootstrap_samples)) bootstrap_nontargets = utils.sample_angle((nontargets.shape[0], nontargets.shape[1], nb_bootstrap_samples)) for i in progress.ProgressDisplay(np.arange(nb_bootstrap_samples), display=progress.SINGLE_LINE): if resample_responses and resample_targets: em_fit = fit(bootstrap_responses[..., i], bootstrap_targets[..., i], bootstrap_nontargets[..., i]) elif resample_responses and not resample_targets: em_fit = fit(bootstrap_responses[..., i], target, bootstrap_nontargets[..., i]) elif not resample_responses and resample_targets: em_fit = fit(responses, bootstrap_targets[..., i], bootstrap_nontargets[..., i]) elif not resample_responses and not resample_targets: em_fit = fit(responses, target, bootstrap_nontargets[..., i]) else: raise ValueError('Weird! %d %d' % (resample_responses, resample_targets)) bootstrap_results.append(em_fit) nontarget_bootstrap_samples = np.array([bootstr_res['mixt_nontargets'] for bootstr_res in bootstrap_results]) # Estimate CDF nontarget_bootstrap_ecdf = stmodsdist.empirical_distribution.ECDF(nontarget_bootstrap_samples) # Compute the p-value for the provided em_fit = fit(responses, target, nontargets) p_value_bootstrap = 1. - nontarget_bootstrap_ecdf(em_fit['mixt_nontargets']) return dict(p_value=p_value_bootstrap, nontarget_ecdf=nontarget_bootstrap_ecdf, em_fit=em_fit, nontarget_bootstrap_samples=nontarget_bootstrap_samples, bootstrap_results_all=bootstrap_results)
def test(): ''' Does a Unit test, samples data from a mixture of one Von mises and random perturbations. Then fits the model and check if everything works. ''' N = 5000 N_rnd = N/5 target = np.zeros(N) # kappa_space = np.array([1.0, 5.0, 20., 100, 3000, 5000., 0.5]) kappa_space = np.array([5.0]) kappa_fitted = np.zeros(kappa_space.size) for kappa_i, kappa in enumerate(kappa_space): print kappa responses = spst.vonmises.rvs(kappa, size=(N)) responses[np.random.randint(N, size=N_rnd)] = utils.sample_angle(N_rnd) em_fit = fit(responses, target, debug=False) kappa_fitted[kappa_i] = em_fit['kappa'] print em_fit # Check if estimated kappa is within 20% of target one print kappa_fitted, kappa_fitted/kappa_space assert np.all(np.abs(kappa_fitted/kappa_space - 1.0) < 0.20)
def sample_from_fit(em_fit_result_dict, targets, nontargets): ''' Get N samples from the Mixture model defined by em_fit_result_dict ''' N = targets.size K = nontargets.shape[1] # Pre-sample items on target responses = spst.vonmises.rvs(em_fit_result_dict['kappa'], size=(N)) # Randomly flip some to nontargets or random component, depending on a random coin toss (classical cumulative prob trick) samples_rand_N = np.random.random((N, 1)) probs_components = np.r_[np.array([em_fit_result_dict['mixt_target']]), np.array([em_fit_result_dict['mixt_nontargets']]*K)/K, em_fit_result_dict['mixt_random']] cumprobs_components = np.cumsum(probs_components) samples_components = samples_rand_N < cumprobs_components # Move the targets responses += samples_components[:, 0]*targets samples_components *= ~samples_components[:, 0][:, np.newaxis] # Move the nontargets for k in xrange(K): responses += samples_components[:, k+1]*nontargets[:, k] samples_components *= ~samples_components[:, k+1][:, np.newaxis] # Resample randomly the random ones responses[samples_components[:, -1]] = utils.sample_angle(size=np.sum(samples_components[:, -1])) return responses
def assign_prefered_stimuli_random(self, neurons_indices): ''' Randomly assign preferred stimuli to all neurons ''' new_stim = utils.sample_angle(size=(neurons_indices.size, self.R)) # Assign the preferred stimuli # Unintialized neurons will get masked out down there. self.neurons_preferred_stimulus[neurons_indices[:new_stim.shape[0]]] = new_stim
def collect_network_responses(self, num_samples=5000): ''' Sample network responses (population code outputs) over the entire space, to be used for empirical estimates ''' responses = np.empty((num_samples, self.M)) random_angles = utils.sample_angle((num_samples, self.R)) for i in progress.ProgressDisplay(xrange(num_samples), display=progress.SINGLE_LINE): responses[i] = self.get_network_response(random_angles[i]) return responses
def test_optimised_network_response(): R = 2 M = int(50*R + 10**R) ratio = 0.1 rn = HighDimensionNetwork.create_mixed(M, R=R, ratio_feature_conjunctive=ratio, autoset_parameters=True, response_maxout=False) print "Testing if optimised and non-optimised network response are the same..." rnd_angles = utils.sample_angle((10000, R)) all_correct = True for curr_angles in rnd_angles: all_correct = all_correct and np.allclose(rn.get_network_response_bivariatefisher(curr_angles, params=dict(opt=True)), rn.get_network_response_bivariatefisher(curr_angles, params=dict(opt=False))) assert all_correct, "Optimised and normal network response do not correspond..."
def compute_maximum_activation_network(self, nb_samples=100): """Try to estimate the maximum activation for the network. This can be used to make sure sigmax is adapted, or to renormalize everything. """ test_samples = utils.sample_angle((nb_samples, self.R)) max_activation = 0 for test_sample in test_samples: max_activation = max( np.nanmax(self.get_network_response(test_sample)), max_activation) return max_activation
def test_simple(): ''' Does a Unit test, samples data from a mixture of one Von mises and random perturbations. Then fits the model and check if everything works. ''' import em_circularmixture show_checks = False N = 200 Tmax = 5 T_space = np.arange(1, Tmax+1) kappa_theta = np.array([10., -0.7, -0.3]) angles_nontargets = utils.sample_angle((Tmax, Tmax, Tmax-1)) targets = np.zeros((Tmax, Tmax, N)) nontargets = np.ones((Tmax, Tmax, N, Tmax-1))*angles_nontargets[:, :, np.newaxis, :] responses = np.zeros((Tmax, Tmax, N)) # Filter impossible trecalls ind_filt = np.triu_indices(Tmax, 1) targets[ind_filt] = np.nan nontargets[ind_filt] = np.nan responses[ind_filt] = np.nan # Correct nontargets just to be sure for T_i, T in enumerate(T_space): for trecall_i, trecall in enumerate(T_space): nontargets[T_i, trecall_i, :, (T-1):] = np.nan for T_i, T in enumerate(T_space): for trecall_i, trecall in enumerate(T_space): if trecall <= T: kappa_target = compute_kappa(T, trecall, kappa_theta) em_fit_target = dict(kappa=kappa_target, mixt_target=0.75, mixt_nontargets=0.15, mixt_random=0.1) # Sample from Von Mises responses[T_i, trecall_i] = em_circularmixture.sample_from_fit(em_fit_target, targets[T_i, trecall_i], nontargets[T_i, trecall_i, :, :(T - 1)]) print "T: {T:d}, trecall: {trecall:d}".format(T=T, trecall=trecall) if show_checks: em_fit = em_circularmixture.fit(responses[T_i, trecall_i], targets[T_i, trecall_i], nontargets[T_i, trecall_i, :, :(T - 1)]) print "True: kappa={kappa:.5}, pt={mixt_target:.3}, pnt={mixt_nontargets:.3}, pr={mixt_random:.3}".format(**em_fit_target) print "Fitted: kappa={kappa:.5}, pt={mixt_target:.3}, pnt={mixt_nontargets:.3}, pr={mixt_random:.3}".format(**em_fit) # Now try full fit with alpha/beta/gamma # result_dict = fit(T_space, responses, targets, nontargets, debug=True) return T_space, responses, targets, nontargets
def test_bays09like(): ''' Uses kappa and prob mixtures from Bays09 ''' N = 2000 T_space = np.array([1, 2, 4, 6]) Tnum = T_space.size Tmax = T_space.max() kappa_space = np.array([ 19.76349326, 11.2619971 , 9.22001848, 8.30524648]) probtarget_space = np.array([ 0.98688956, 0.92068596, 0.71474023, 0.5596124 ]) probnontarget_space = np.array([ 0. , 0.02853913, 0.10499085, 0.28098455]) probrandom_space = np.array([ 0.01311044, 0.05077492, 0.18026892, 0.15940305]) beta, alpha = utils.fit_powerlaw(T_space, kappa_space) angles_nontargets = utils.sample_angle((Tnum, Tmax-1)) targets = np.zeros((Tnum, N)) nontargets = np.ones((Tnum, N, Tmax-1))*angles_nontargets[:, np.newaxis, :] responses = np.zeros((Tnum, N)) for K_i, K in enumerate(T_space): nontargets[K_i, :, (K-1):] = np.nan for T_i, T in enumerate(T_space): kappa_target = alpha*(T)**beta em_fit_target = dict(kappa=kappa_target, alpha=alpha, beta=beta, mixt_target=probtarget_space[T_i], mixt_nontargets=probnontarget_space[T_i], mixt_random=probrandom_space[T_i]) import em_circularmixture # Sample from Von Mises responses[T_i] = sample_from_fit(em_fit_target, targets[T_i], nontargets[T_i, :, :T_i]) em_fit = em_circularmixture.fit(responses[T_i], targets[T_i], nontargets[T_i, :, :(T-1)]) print "True: kappa={kappa:.5}, pt={mixt_target:.3}, pnt={mixt_nontargets:.3}, pr={mixt_random:.3}".format(**em_fit_target) print "Fitted: kappa={kappa:.5}, pt={mixt_target:.3}, pnt={mixt_nontargets:.3}, pr={mixt_random:.3}".format(**em_fit) # Now try full fit with alpha/beta # em_fit = fit(responses, targets, nontargets) return T_space, responses, targets, nontargets
def test(): ''' Does a Unit test, samples data from a mixture of one Von mises and random perturbations. Then fits the model and check if everything works. ''' N = 1000 N_nontarget = N/3 N_rnd = N/5 angles_nontargets = np.array([-np.pi/3-1., 1+np.pi/2.]) K = angles_nontargets.size target = np.zeros(N) nontargets = np.ones((N, K))*angles_nontargets kappa_space = np.array([[10.0, 8.0, 20.0]]) kappa_fitted = np.zeros((kappa_space.shape[0], K+1)) for kappa_i, kappas in enumerate(kappa_space): print kappas # Sample from Von Mises responses = spst.vonmises.rvs(kappas[0], size=(N)) # Randomly displace some points to their nontarget location (should still be VonMises(kappa)) for k in xrange(K): curr_rand_indices = np.random.randint(N, size=N_nontarget/K) responses[curr_rand_indices] = spst.vonmises.rvs(kappas[k+1], size=(N)) responses[curr_rand_indices] += angles_nontargets[k] # Forces some points to be random responses[np.random.randint(N, size=N_rnd)] = utils.sample_angle(N_rnd) em_fit = fit(responses, target, nontarget_angles=nontargets, debug=False) kappa_fitted[kappa_i] = em_fit['kappa'] print em_fit # Check if estimated kappa is within 20% of target one print kappa_fitted/kappa_space[:, np.newaxis] assert np.all(np.abs(kappa_fitted/kappa_space[:, np.newaxis] - 1.0) < 0.20)
def test_simple(): ''' Does a Unit test, samples data from a mixture of one Von mises and random perturbations. Then fits the model and check if everything works. ''' N = 1000 Tmax = 5 T_space = np.arange(1, Tmax+1) alpha = 9.8 beta = -0.58 angles_nontargets = utils.sample_angle((Tmax, Tmax-1)) targets = np.zeros((Tmax, N)) nontargets = np.ones((Tmax, N, Tmax-1))*angles_nontargets[:, np.newaxis, :] responses = np.zeros((Tmax, N)) # Correct nontargets just to be sure for K_i, K in enumerate(T_space): nontargets[K_i, :, (K-1):] = np.nan for K in xrange(Tmax): kappa_target = alpha*(K+1.0)**beta em_fit_target = dict(kappa=kappa_target, alpha=alpha, beta=beta, mixt_target=0.7, mixt_nontargets=0.2, mixt_random=0.1) import em_circularmixture # Sample from Von Mises responses[K] = sample_from_fit(em_fit_target, targets[K], nontargets[K, :, :K]) em_fit = em_circularmixture.fit(responses[K], targets[K], nontargets[K, :, :K]) print "True: kappa={kappa:.5}, pt={mixt_target:.3}, pnt={mixt_nontargets:.3}, pr={mixt_random:.3}".format(**em_fit_target) print "Fitted: kappa={kappa:.5}, pt={mixt_target:.3}, pnt={mixt_nontargets:.3}, pr={mixt_random:.3}".format(**em_fit) # Now try full fit with alpha/beta # em_fit = fit(responses, targets, nontargets) return T_space, responses, targets, nontargets
def test_bootstrap_nontargets(): ''' Check how the bootstrapped test for misbinding errors behaves ''' N = 300 nb_nontargets = 1 kappa = 5.0 target = np.zeros(N) nontargets = utils.wrap_angles(np.linspace(0.0, 2*np.pi, nb_nontargets + 1, endpoint=False)[1:])*np.ones((N, nb_nontargets)) responses = spst.vonmises.rvs(kappa, size=(N)) responses[np.random.randint(N, size=N/3)] = utils.sample_angle(N/3) # em_fit = fit(responses, target, nontargets) bootstrap_results = bootstrap_nontarget_stat(responses, target, nontargets) print bootstrap_results assert bootstrap_results['p_value'] > 0.05, "No misbinding here, should not reject H0"
def check_precision_sensitivity_determ(): ''' Let's construct a situation where we have one Von Mises component and one random component. See how the random component affects the basic precision estimator we use elsewhere. ''' N = 1000 kappa_space = np.array([3., 10., 20.]) # kappa_space = np.array([3.]) nb_repeats = 20 ratio_to_kappa = False savefigs = True precision_nb_samples = 101 N_rnd_space = np.linspace(0, N/2, precision_nb_samples).astype(int) precision_all = np.zeros((N_rnd_space.size, nb_repeats)) kappa_estimated_all = np.zeros((N_rnd_space.size, nb_repeats)) precision_squared_all = np.zeros((N_rnd_space.size, nb_repeats)) kappa_mixtmodel_all = np.zeros((N_rnd_space.size, nb_repeats)) mixtmodel_all = np.zeros((N_rnd_space.size, nb_repeats, 2)) dataio = DataIO.DataIO() target_samples = np.zeros(N) for kappa in kappa_space: true_kappa = kappa*np.ones(N_rnd_space.size) # First sample all as von mises samples_all = spst.vonmises.rvs(kappa, size=(N_rnd_space.size, nb_repeats, N)) for repeat in progress.ProgressDisplay(xrange(nb_repeats)): for i, N_rnd in enumerate(N_rnd_space): samples = samples_all[i, repeat] # Then set K of them to random [-np.pi, np.pi] values. samples[np.random.randint(N, size=N_rnd)] = utils.sample_angle(N_rnd) # Estimate precision from those samples. precision_all[i, repeat] = utils.compute_precision_samples(samples, square_precision=False, remove_chance_level=False) precision_squared_all[i, repeat] = utils.compute_precision_samples(samples, square_precision=True) # convert circular std dev back to kappa kappa_estimated_all[i, repeat] = utils.stddev_to_kappa(1./precision_all[i, repeat]) # Fit mixture model params_fit = em_circularmixture.fit(samples, target_samples) kappa_mixtmodel_all[i, repeat] = params_fit['kappa'] mixtmodel_all[i, repeat] = params_fit['mixt_target'], params_fit['mixt_random'] print "%d/%d N_rnd: %d, Kappa: %.3f, precision: %.3f, kappa_tilde: %.3f, precision^2: %.3f, kappa_mixtmod: %.3f" % (repeat, nb_repeats, N_rnd, kappa, precision_all[i, repeat], kappa_estimated_all[i, repeat], precision_squared_all[i, repeat], kappa_mixtmodel_all[i, repeat]) if ratio_to_kappa: precision_all /= kappa precision_squared_all /= kappa kappa_estimated_all /= kappa true_kappa /= kappa f, ax = plt.subplots() ax.plot(N_rnd_space/float(N), true_kappa, 'k-', linewidth=3, label='Kappa_true') utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(precision_all, axis=-1), np.std(precision_all, axis=-1), ax_handle=ax, label='precision') utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(precision_squared_all, axis=-1), np.std(precision_squared_all, axis=-1), ax_handle=ax, label='precision^2') utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(kappa_estimated_all, axis=-1), np.std(kappa_estimated_all, axis=-1), ax_handle=ax, label='kappa_tilde') utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(kappa_mixtmodel_all, axis=-1), np.std(kappa_mixtmodel_all, axis=-1), ax_handle=ax, label='kappa mixt model') ax.legend() ax.set_title('Effect of random samples on precision. kappa: %.2f. ratiokappa %s' % (kappa, ratio_to_kappa)) ax.set_xlabel('Proportion random samples. N tot %d' % N) ax.set_ylabel('Kappa/precision (not same units)') f.canvas.draw() if savefigs: dataio.save_current_figure("precision_sensitivity_kappa%dN%d_{unique_id}.pdf" % (kappa, N)) # Do another plot, with kappa and mixt_target/mixt_random. Use left/right axis separately f, ax = plt.subplots() ax2 = ax.twinx() # left axis, kappa ax.plot(N_rnd_space/float(N), true_kappa, 'k-', linewidth=3, label='kappa true') utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(kappa_mixtmodel_all, axis=-1), np.std(kappa_mixtmodel_all, axis=-1), ax_handle=ax, label='kappa') # Right axis, mixture probabilities utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(mixtmodel_all[..., 0], axis=-1), np.std(mixtmodel_all[..., 0], axis=-1), ax_handle=ax2, label='mixt target', color='r') utils.plot_mean_std_area(N_rnd_space/float(N), np.mean(mixtmodel_all[..., 1], axis=-1), np.std(mixtmodel_all[..., 1], axis=-1), ax_handle=ax2, label='mixt random', color='g') ax.set_title('Mixture model parameters evolution. kappa: %.2f, ratiokappa %s' % (kappa, ratio_to_kappa)) ax.set_xlabel('Proportion random samples. N tot %d' % N) ax.set_ylabel('Kappa') ax2.set_ylabel('Mixture proportions') lines, labels = ax.get_legend_handles_labels() lines2, labels2 = ax2.get_legend_handles_labels() ax.legend(lines + lines2, labels + labels2) if savefigs: dataio.save_current_figure("precision_sensitivity_mixtmodel_kappa%dN%d_{unique_id}.pdf" % (kappa, N)) return locals()
def compute_bootstrap_samples(dataset, nb_bootstrap_samples, angle_space): responses_resampled = np.empty( (np.unique(dataset['n_items']).size, nb_bootstrap_samples), dtype=np.object) error_nontargets_resampled = np.empty( (np.unique(dataset['n_items']).size, nb_bootstrap_samples), dtype=np.object) error_targets_resampled = np.empty( (np.unique(dataset['n_items']).size, nb_bootstrap_samples), dtype=np.object) hist_cnts_nontarget_bootstraps_nitems = np.empty( (np.unique(dataset['n_items']).size, nb_bootstrap_samples, angle_space.size - 1))*np.nan hist_cnts_target_bootstraps_nitems = np.empty( (np.unique(dataset['n_items']).size, nb_bootstrap_samples, angle_space.size - 1))*np.nan bootstrap_data = { 'responses_resampled': responses_resampled, 'error_nontargets_resampled': error_nontargets_resampled, 'error_targets_resampled': error_targets_resampled, 'hist_cnts_nontarget_bootstraps_nitems': hist_cnts_nontarget_bootstraps_nitems, 'hist_cnts_target_bootstraps_nitems': hist_cnts_target_bootstraps_nitems, } for n_items_i, n_items in enumerate(np.unique(dataset['n_items'])): # Data collapsed accross subjects ids_filtered = (dataset['n_items'] == n_items).flatten() if n_items > 1: # Get random bootstrap nontargets bootstrap_nontargets = utils.sample_angle( dataset['item_angle'][ids_filtered, 1:n_items].shape + (nb_bootstrap_samples, )) # Compute associated EM fits # bootstrap_results = [] for bootstrap_i in progress.ProgressDisplay(np.arange(nb_bootstrap_samples), display=progress.SINGLE_LINE): em_fit = em_circularmixture.fit( dataset['response'][ids_filtered, 0], dataset['item_angle'][ids_filtered, 0], bootstrap_nontargets[..., bootstrap_i]) # bootstrap_results.append(em_fit) # Get EM samples responses_resampled[n_items_i, bootstrap_i] = ( em_circularmixture.sample_from_fit( em_fit, dataset['item_angle'][ids_filtered, 0], bootstrap_nontargets[..., bootstrap_i])) # Compute the errors error_nontargets_resampled[n_items_i, bootstrap_i] = ( utils.wrap_angles( responses_resampled[n_items_i, bootstrap_i][:, np.newaxis] - bootstrap_nontargets[..., bootstrap_i])) error_targets_resampled[n_items_i, bootstrap_i] = ( utils.wrap_angles( responses_resampled[n_items_i, bootstrap_i] - dataset['item_angle'][ids_filtered, 0])) # Bin everything (hist_cnts_nontarget_bootstraps_nitems[n_items_i, bootstrap_i], _, _) = ( utils.histogram_binspace( utils.dropnan( error_nontargets_resampled[n_items_i, bootstrap_i]), bins=angle_space, norm='density')) (hist_cnts_target_bootstraps_nitems[n_items_i, bootstrap_i], _, _) = ( utils.histogram_binspace( utils.dropnan( error_targets_resampled[n_items_i, bootstrap_i]), bins=angle_space, norm='density')) return bootstrap_data
def test(): ''' Does a Unit test, samples data from a mixture of one Von mises and random perturbations. Then fits the model and check if everything works. ''' N = 1000 angles_targets = np.array([0.0]) # angles_nontargets = np.array([-np.pi/3-1., 1+np.pi/2.]) angles_nontargets = np.array([-np.pi/3-1.]) K = angles_nontargets.size prob_nontargets= 0.3 prob_rnd = 0.2 probs_components = np.array( [1 - prob_nontargets - prob_rnd] + [prob_nontargets/K]*K + [prob_rnd] ) cumprobs_components = np.cumsum(probs_components) target = np.zeros(N) nontargets = np.ones((N, K))*angles_nontargets kappa_space = np.array([10.0]) fitted_params = np.zeros((kappa_space.shape[0], 4)) for kappa_i, kappa in enumerate(kappa_space): print kappa ## Create random samples from a mixture of von mises # Sample from Von Mises responses = spst.vonmises.rvs(kappa, size=(N)) # Decide which sample goes to which component samples_rand_N = np.random.random((N, 1)) samples_components = samples_rand_N < cumprobs_components # Move the targets responses += samples_components[:, 0]*angles_targets samples_components *= ~samples_components[:, 0][:, np.newaxis] # Move the nontargets for k in xrange(K): responses += samples_components[:, k+1]*nontargets[:, k] samples_components *= ~samples_components[:, k+1][:, np.newaxis] # Resample randomly the random ones responses[samples_components[:, -1]] = utils.sample_angle(size=np.sum(samples_components[:, -1])) ## Finished # Fit the model em_fit = fit(responses, target, nontarget_angles=nontargets, debug=False, kappa=kappa) fitted_params[kappa_i] = np.array([em_fit['kappa'], em_fit['mixt_target'], np.sum(em_fit['mixt_nontargets']), em_fit['mixt_random']]) print em_fit # Check if estimated kappa is within 20% of target one true_params = np.ones((kappa_space.shape[0], 4)) true_params[:, 0] = kappa_space true_params[:, 1:] = np.array([1 - prob_nontargets - prob_rnd, prob_nontargets, prob_rnd]) print fitted_params/true_params assert np.all(np.abs(fitted_params/true_params - 1.0) < 0.20)
def plot_bootstrap_randomsamples(): ''' Do histograms with random samples from bootstrap nontarget estimates ''' dataio = DataIO(label='plotpaper_bootstrap_randomized') nb_bootstrap_samples = 200 use_precomputed = True angle_space = np.linspace(-np.pi, np.pi, 51) bins_center = angle_space[:-1] + np.diff(angle_space)[0]/2 data_bays2009 = load_experimental_data.load_data_bays09(fit_mixture_model=True) ## Super long simulation, use precomputed data maybe? if use_precomputed: data = pickle.load(open('/Users/loicmatthey/Dropbox/UCL/1-phd/Work/Visual_working_memory/code/git-bayesian-visual-working-memory/Data/cache_randomized_bootstrap_samples_plots_paper_theo_plotbootstrapsamples/bootstrap_histo_katz.npy', 'r')) responses_resampled = data['responses_resampled'] error_nontargets_resampled = data['error_nontargets_resampled'] error_targets_resampled = data['error_targets_resampled'] hist_cnts_nontarget_bootstraps_nitems = data['hist_cnts_nontarget_bootstraps_nitems'] hist_cnts_target_bootstraps_nitems = data['hist_cnts_target_bootstraps_nitems'] else: responses_resampled = np.empty((np.unique(data_bays2009['n_items']).size, nb_bootstrap_samples), dtype=np.object) error_nontargets_resampled = np.empty((np.unique(data_bays2009['n_items']).size, nb_bootstrap_samples), dtype=np.object) error_targets_resampled = np.empty((np.unique(data_bays2009['n_items']).size, nb_bootstrap_samples), dtype=np.object) hist_cnts_nontarget_bootstraps_nitems = np.empty((np.unique(data_bays2009['n_items']).size, nb_bootstrap_samples, angle_space.size - 1))*np.nan hist_cnts_target_bootstraps_nitems = np.empty((np.unique(data_bays2009['n_items']).size, nb_bootstrap_samples, angle_space.size - 1))*np.nan for n_items_i, n_items in enumerate(np.unique(data_bays2009['n_items'])): # Data collapsed accross subjects ids_filtered = (data_bays2009['n_items'] == n_items).flatten() if n_items > 1: # Get random bootstrap nontargets bootstrap_nontargets = utils.sample_angle(data_bays2009['item_angle'][ids_filtered, 1:n_items].shape + (nb_bootstrap_samples, )) # Compute associated EM fits bootstrap_results = [] for bootstrap_i in progress.ProgressDisplay(np.arange(nb_bootstrap_samples), display=progress.SINGLE_LINE): em_fit = em_circularmixture_allitems_uniquekappa.fit(data_bays2009['response'][ids_filtered, 0], data_bays2009['item_angle'][ids_filtered, 0], bootstrap_nontargets[..., bootstrap_i]) bootstrap_results.append(em_fit) # Get EM samples responses_resampled[n_items_i, bootstrap_i] = em_circularmixture_allitems_uniquekappa.sample_from_fit(em_fit, data_bays2009['item_angle'][ids_filtered, 0], bootstrap_nontargets[..., bootstrap_i]) # Compute the errors error_nontargets_resampled[n_items_i, bootstrap_i] = utils.wrap_angles(responses_resampled[n_items_i, bootstrap_i][:, np.newaxis] - bootstrap_nontargets[..., bootstrap_i]) error_targets_resampled[n_items_i, bootstrap_i] = utils.wrap_angles(responses_resampled[n_items_i, bootstrap_i] - data_bays2009['item_angle'][ids_filtered, 0]) # Bin everything hist_cnts_nontarget_bootstraps_nitems[n_items_i, bootstrap_i], x, bins = utils.histogram_binspace(utils.dropnan(error_nontargets_resampled[n_items_i, bootstrap_i]), bins=angle_space, norm='density') hist_cnts_target_bootstraps_nitems[n_items_i, bootstrap_i], x, bins = utils.histogram_binspace(utils.dropnan(error_targets_resampled[n_items_i, bootstrap_i]), bins=angle_space, norm='density') # Now show average histogram hist_cnts_target_bootstraps_nitems_mean = np.mean(hist_cnts_target_bootstraps_nitems, axis=-2) hist_cnts_target_bootstraps_nitems_std = np.std(hist_cnts_target_bootstraps_nitems, axis=-2) hist_cnts_target_bootstraps_nitems_sem = hist_cnts_target_bootstraps_nitems_std/np.sqrt(hist_cnts_target_bootstraps_nitems.shape[1]) hist_cnts_nontarget_bootstraps_nitems_mean = np.mean(hist_cnts_nontarget_bootstraps_nitems, axis=-2) hist_cnts_nontarget_bootstraps_nitems_std = np.std(hist_cnts_nontarget_bootstraps_nitems, axis=-2) hist_cnts_nontarget_bootstraps_nitems_sem = hist_cnts_nontarget_bootstraps_nitems_std/np.sqrt(hist_cnts_target_bootstraps_nitems.shape[1]) f1, axes1 = plt.subplots(ncols=np.unique(data_bays2009['n_items']).size-1, figsize=((np.unique(data_bays2009['n_items']).size-1)*6, 6), sharey=True) for n_items_i, n_items in enumerate(np.unique(data_bays2009['n_items'])): if n_items>1: utils.plot_mean_std_area(bins_center, hist_cnts_nontarget_bootstraps_nitems_mean[n_items_i], hist_cnts_nontarget_bootstraps_nitems_sem[n_items_i], ax_handle=axes1[n_items_i-1], color='k') # Now add the Data histograms axes1[n_items_i-1].bar(bins_center, data_bays2009['hist_cnts_nontarget_nitems_stats']['mean'][n_items_i], width=2.*np.pi/(angle_space.size-1), align='center', yerr=data_bays2009['hist_cnts_nontarget_nitems_stats']['sem'][n_items_i]) # axes4[n_items_i-1].set_title('N=%d' % n_items) axes1[n_items_i-1].set_xlim([bins_center[0]-np.pi/(angle_space.size-1), bins_center[-1]+np.pi/(angle_space.size-1)]) # axes3[n_items_i-1].set_ylim([0., 2.0]) axes1[n_items_i-1].set_xticks((-np.pi, -np.pi/2, 0, np.pi/2., np.pi)) axes1[n_items_i-1].set_xticklabels((r'$-\pi$', r'$-\frac{\pi}{2}$', r'$0$', r'$\frac{\pi}{2}$', r'$\pi$'), fontsize=16) # axes1[n_items_i-1].bar(bins_center, hist_cnts_nontarget_bootstraps_nitems_mean[n_items_i], width=2.*np.pi/(angle_space.size-1), align='center', yerr=hist_cnts_nontarget_bootstraps_nitems_std[n_items_i]) axes1[n_items_i-1].get_figure().canvas.draw() if dataio is not None: plt.tight_layout() dataio.save_current_figure("hist_error_nontarget_persubj_{label}_{unique_id}.pdf") if False: f2, axes2 = plt.subplots(ncols=np.unique(data_bays2009['n_items']).size-1, figsize=((np.unique(data_bays2009['n_items']).size-1)*6, 6), sharey=True) for n_items_i, n_items in enumerate(np.unique(data_bays2009['n_items'])): utils.plot_mean_std_area(bins_center, hist_cnts_target_bootstraps_nitems_mean[n_items_i], hist_cnts_target_bootstraps_nitems_std[n_items_i], ax_handle=axes2[n_items_i-1]) # axes2[n_items_i-1].bar(bins_center, hist_cnts_target_bootstraps_nitems_mean[n_items_i], width=2.*np.pi/(angle_space.size-1), align='center', yerr=hist_cnts_target_bootstraps_nitems_std[n_items_i]) return locals()