def geom_wrap(expt, qnum, sn, subject_start=0, subject_end=33): beliefs, metas, actual, _, qnames = vars_from_data(dataset=expt) beliefs = beliefs[qnum] metas = metas[qnum] #beliefs = beliefs[:, subject_start:subject_end] #metas = metas[:, subject_start:subject_end] num_qs = 1 num_ppl = beliefs.shape[1] params = setup_params.init_params_demo1() params['num_qs'] = num_qs params['num_responses'] = num_ppl params['binary_beliefs'] = False params['meta_noise_hierarchy'] = "default" params['meta_noise_default'] = sn params['own_noise_hierarchy'] = "default" params['own_noise_default'] = sn params['use_expertise'] = False params['expertise_hierarchy'] = "individual" params['prior_wp_sm'] = "signal_assumption" params['do_parallel'] = False wp_grid = 10 #33, sm_grid = 50 #200 results = geom_grid(beliefs, metas, params, wp_grid, sm_grid) probs = [geom_probs_ll(res, wp_grid, sm_grid) for res in results] for pp in probs: print(np.nansum(pp[0,:,:,:])) reorder = [geom_reorder(pp, wp_grid, sm_grid) for pp in probs] 1/0
def demo8(num_mh=None): params = setup_params.init_params_demo1() params['prior_wp_sm'] = "gen_bij_infer_all" params['binary_beliefs'] = False #params['meta_noise_hierarchy'] = "individual" #params['own_noise_hierarchy'] = "individual" params['meta_noise_hierarchy'] = "default" params['own_noise_hierarchy'] = "default" params['meta_noise_type'] = "truncnorm" params['own_noise_type'] = "truncnorm" params['meta_noise_default'] = 1 params['own_noise_default'] = 1 params['num_responses'] = 15 params['use_expertise'] = False params['expertise_hierarchy'] = "individual" params['num_qs'] = 1 #48 if num_mh is None: params['num_mh_samples'] = 5000 #5000 else: params['num_mh_samples'] = num_mh params['thin'] = 1 params['num_mh_expertise'] = 0 #50 params['num_mh_outer'] = 1 #30 params['use_meta'] = True params['do_parallel'] = False params['fxd_actual'] = False params['all_same'] = False params['transition'] = "separate" return generic_demo(params)
def lesions_expt(qnum): beliefs, meta, act, nom = vars_from_data(dataset="lesions") params = setup_params.init_params_demo1() params['num_qs'] = 1 params['num_responses'] = beliefs[qnum].shape[1] params['num_mh_samples'] = 200 states, indiv_states, accept, internals = do_inference(params, beliefs[qnum:qnum+1], meta[qnum:qnum+1]) mh = diagnostics.summarise_qs(states, indiv_states) return mh, beliefs, meta, act
def simple_mh_all(expt, num_mh, qnums=None): beliefs, meta, act, nom, qnames = vars_from_data(dataset=expt) if qnums is not None: beliefs = [beliefs[qn] for qn in qnums] meta = [meta[qn] for qn in qnums] act = [act[qn] for qn in qnums] nom = [nom[qn] for qn in qnums] params = setup_params.init_params_demo1() params['num_qs'] = len(beliefs) params['num_mh_samples'] = num_mh params['do_parallel'] = False states, indiv_states, accept, internals = do_inference(params, beliefs, meta) mh = diagnostics.summarise_qs(states, indiv_states, params) save_mh_results(qnames, mh, act, params) 1/0 return mh, beliefs, meta, act, qnames
def wp_expt(sm=np.array([[.7, .4], [.3, .6]]), wp=np.array([.1, .9]), change_wp=True): params = setup_params.init_params_demo1() params['binary_beliefs'] = False indiv = fwd.generate_individual(params) grid = 100 diff = 0 res0 = np.zeros((grid-grid*diff, grid-grid*diff)) res1 = np.zeros((grid-grid*diff, grid-grid*diff)) bm0 = np.zeros((grid-grid*diff, grid-grid*diff)) bm1 = np.zeros((grid-grid*diff, grid-grid*diff)) mm0 = np.zeros((grid-grid*diff, grid-grid*diff)) mm1 = np.zeros((grid-grid*diff, grid-grid*diff)) mm0avg = np.zeros((grid-grid*diff, grid-grid*diff)) #hacky since grid needn't be nice num.: for k, gen in enumerate(np.arange(diff * grid, grid)): actual = {} gen_s0 = gen / grid + .01 if change_wp: gen_wp = np.array([gen_s0, 1 - gen_s0]) actual['wp'] = gen_wp actual['sm'] = sm else: actual['wp'] = wp gen_sm = np.array([[gen_s0, gen_s0 - diff], [1 - gen_s0, 1 - gen_s0 + diff]]) actual['sm'] = gen_sm actual['world'] = 0 actual['signals'] = fwd.sample_signals(actual['world'], actual['sm'], params['num_responses']) beliefs, meta = fwd.generate_data(actual, indiv, params) for j, ans in enumerate(np.arange(grid*diff, grid)): ans_s0 = ans / grid + .01 if change_wp: ans_wp = np.array([ans_s0, 1 - ans_s0]) ans_sm = actual['sm'] else: ans_wp = actual['wp'] ans_sm = np.array([[ans_s0, ans_s0 - diff], [1 - ans_s0, 1 - ans_s0 + diff]]) ll = np.array([aggregation.reports_ll_marg(beliefs, meta, w, ans_sm, ans_wp, indiv['own_noise'], indiv['meta_noise'], params) for w in [0, 1]]) ll += aggregation.log_prob_world(actual['world'], ans_wp) bel = fwd.calc_belief_matrix(ans_sm, ans_wp) bm0[k, j] = bel[0,0] bm1[k, j] = bel[0,1] met = fwd.calc_meta_matrix(ans_sm, ans_wp) mm0[k, j] = met[0,0] mm1[k, j] = met[0,1] if bel[0, 0] < .5: mm0avg[k,j] = 0 elif bel[0, 1] > .5: mm0avg[k,j] = 1 else: mm0avg[k,j] = met[0,0] if bel[0, 1] > .5: non_bij = 1 elif bel[0,0] < .5: non_bij = -1 else: non_bij = 0 res0[k, j] = ll[0] res1[k, j] = ll[1] plt.imshow(res0, cmap=cm.Greys_r) plt.imshow(res1, cmap=cm.Greys_r) 1/0 return res