def generic_demo(params): print("Num mh steps: ", params['num_mh_samples']) fwd_results = fwd.generate_synthetic_multiple(params) individual, actual, beliefs, meta = fwd_results if params['own_noise_type'] == "noiseless": params['own_noise_type'] = "truncnorm" params['own_noise_default'] = 0.1 print(datetime.datetime.now()) inf_results = do_inference(params, beliefs, meta) if len(inf_results) == 4: states, indiv_states, accept, internals = inf_results return (individual, actual, beliefs, meta, states, indiv_states, params, accept, internals) else: states, indiv_states, accept = inf_results return (individual, actual, beliefs, meta, states, indiv_states, params, accept) print(datetime.datetime.now())
def accuracy_vs_expertise(): expertise = np.arange(-1, 1, .01) params['expertise_default'] = expertise params['num_responses'] = len(expertise) params['own_noise_hierarchy'] = "individual" #for own_noise in np.arange(0.01, .5, .05): for own_noise in range(10): #params['own_noise_default'] = own_noise correct = np.zeros((num_trials, params['num_responses'])) for n in range(num_trials): if n % 100 == 0: print(n) fwd_results = fwd.generate_synthetic_multiple(params) individual, actual, beliefs, meta = fwd_results nominal = np.array([(np.array(beliefs[0][1,k]) > .5).astype(int) for k in range(beliefs[0].shape[1])]) correct[n,:] = np.where(nominal == actual[0]['world'], 1, 0) accuracy = np.sum(correct, axis=0) #plt.plot(expertise, accuracy) print(own_noise, pearsonr(accuracy, individual['expertise'])) 1/0