length.append(len(transformed_trial)) trials_data = trials_data[:TRIAL_LENGTH] all_data[participant_id] = trials_data count = np.zeros(shape=[6, 3, 6]) for trial_id, trial in enumerate(trials_data): saver.save_data("step", participant_id, trial_id, len(trial)) time = 0 for step in trial: time += step[3] saver.save_data("time", participant_id, trial_id, time) saver.save_data("normalized_time", participant_id, trial_id, time / len(trial)) result = optimal_probability(participant_id, trials_data, is_simulate=False) saver.save_trials_data("optimal", participant_id, result[0]) saver.save_trials_data("optimal_inner", participant_id, result[1]["inner"]) saver.save_trials_data("optimal_outer", participant_id, result[1]["outer"]) saver.save_trials_data("optimal_last", participant_id, result[1]["last"]) if NEED_UPDATE_DATA_FRAME: for trial in range(TRIAL_LENGTH): block = trial // 36 timestep = trial % 36 optimal_data_frame = \ optimal_data_frame.append({"step": saver.get_trial_data("step", participant_id), "reaction_time": saver.get_trial_data("time", participant_id),
continue rawFile = open(path, "r") reader = csv.DictReader(rawFile, delimiter="#") trials_data = [] length = [] for row in reader: trial = row["trial_data"] if trial != "--": transformed_trial = json.loads(trial) trials_data.append(transformed_trial) length.append(len(transformed_trial)) trials_data = trials_data[:TRIAL_LENGTH] steps = [] for trial in trials_data: steps.append(len(trial)) # all_reduction[alpha][tau][participant_id] = steps result = optimal_probability(participant_id, trials_data, is_simulate=True, is_randomized=randomized) # all_reduction[alpha][tau][participant_id] = result[0] # all_reduction[alpha][tau][participant_id] = result[1]["inner"] # all_reduction[alpha][tau][participant_id] = result[1]["outer"] all_reduction[alpha][tau][participant_id] = result[1]["last"] with open("optimal_last_%s_randomized.pkl" % SIMULATE_METHOD, "wb") as f: pickle.dump(all_reduction, f)