def test_concatenate_and_computations(self): trials, _ = self._get_trials(sess_dates=['2020-08-25', '2020-08-24', '2020-08-21']) trials_total = np.sum([len(trials[k]['contrastRight']) for k in trials.keys()]) trials_all = train.concatenate_trials(trials) assert (len(trials_all['contrastRight']) == trials_total) perf_easy = np.array([train.compute_performance_easy(trials[k]) for k in trials.keys()]) n_trials = np.array([train.compute_n_trials(trials[k]) for k in trials.keys()]) psych = train.compute_psychometric(trials_all) rt = train.compute_median_reaction_time(trials_all, contrast=0) np.testing.assert_allclose(perf_easy, [0.91489362, 0.9, 0.90853659]) np.testing.assert_array_equal(n_trials, [617, 532, 719]) np.testing.assert_allclose(psych, [4.04487042, 21.6293942, 1.91451396e-02, 1.72669957e-01], rtol=1e-5) assert (np.isclose(rt, 0.83655))
def _behaviour_criterion(self): """ Computes and update the behaviour criterion on Alyx """ import alf.io from brainbox.behavior import training if self.one is None: # if no instance of Alyx is provided, do not touch any database return trials = alf.io.load_object(self.session_path.joinpath('alf'), 'trials') good_enough = training.criterion_delay( n_trials=trials['intervals'].shape[0], perf_easy=training.compute_performance_easy(trials)) eid = self.one.eid_from_path(self.session_path) self.one.alyx.json_field_update('sessions', eid, 'extended_qc', {'behavior': int(good_enough)})
def _behaviour_criterion(self): """ Computes and update the behaviour criterion on Alyx """ import alf.io from brainbox.behavior import training trials = alf.io.load_object(self.session_path.joinpath("alf"), "trials") good_enough = training.criterion_delay( n_trials=trials["intervals"].shape[0], perf_easy=training.compute_performance_easy(trials), ) eid = self.one.eid_from_path(self.session_path) self.one.alyx.json_field_update("sessions", eid, "extended_qc", {"behavior": int(good_enough)})
def test_concatenate_and_computations(self): sess_dates = ['2020-08-25', '2020-08-24', '2020-08-21'] trials_copy = copy.deepcopy(self.trial_data) trials = Bunch(zip(sess_dates, [trials_copy[k] for k in sess_dates])) _ = [trials[k].pop('task_protocol') for k in trials.keys()] trials_total = np.sum( [len(trials[k]['contrastRight']) for k in trials.keys()]) trials_all = train.concatenate_trials(trials) assert (len(trials_all.contrastRight) == trials_total) perf_easy = np.array( [train.compute_performance_easy(trials[k]) for k in trials.keys()]) n_trials = np.array( [train.compute_n_trials(trials[k]) for k in trials.keys()]) psych = train.compute_psychometric(trials_all) rt = train.compute_median_reaction_time(trials_all) np.testing.assert_allclose(perf_easy, [0.91489362, 0.9, 0.90853659]) np.testing.assert_array_equal(n_trials, [617, 532, 719]) np.testing.assert_allclose( psych, [4.04487042, 21.6293942, 1.91451396e-02, 1.72669957e-01], rtol=1e-5) assert (np.isclose(rt, 0.83655))
def get_training_info_for_session(session_paths, one): """ Extract the training information needed for plots for each session :param session_paths: list of session paths on same date :param one: ONE instance :return: """ # return list of dicts to add sess_dicts = [] for session_path in session_paths: session_path = Path(session_path) sess_dict = {} sess_dict['date'] = str(one.path2ref(session_path)['date']) sess_dict['session_path'] = str(session_path) sess_dict['task_protocol'] = get_session_extractor_type(session_path) if sess_dict['task_protocol'] == 'habituation': nan_array = np.array([np.nan]) sess_dict['performance'], sess_dict['contrasts'], _ = (nan_array, nan_array, np.nan) sess_dict['performance_easy'] = np.nan sess_dict['reaction_time'] = np.nan sess_dict['n_trials'] = np.nan sess_dict['sess_duration'] = np.nan sess_dict['n_delay'] = np.nan sess_dict['location'] = np.nan sess_dict['training_status'] = 'habituation' else: # if we can't compute trials then we need to pass trials = load_trials(session_path, one) if trials is None: continue sess_dict['performance'], sess_dict[ 'contrasts'], _ = training.compute_performance(trials, prob_right=True) sess_dict['performance_easy'] = training.compute_performance_easy( trials) sess_dict['reaction_time'] = training.compute_median_reaction_time( trials) sess_dict['n_trials'] = training.compute_n_trials(trials) sess_dict['sess_duration'], sess_dict['n_delay'], sess_dict['location'] = \ compute_session_duration_delay_location(session_path) sess_dict['task_protocol'] = get_session_extractor_type( session_path) sess_dict['training_status'] = 'not_computed' sess_dicts.append(sess_dict) protocols = [s['task_protocol'] for s in sess_dicts] if len(protocols) > 0 and len(set(protocols)) != 1: print( f'Different protocols on same date {sess_dicts[0]["date"]} : {protocols}' ) if len(sess_dicts) > 1 and len( set(protocols)) == 1: # Only if all protocols are the same print( f'{len(sess_dicts)} sessions being combined for date {sess_dicts[0]["date"]}' ) combined_trials = load_combined_trials(session_paths, one) performance, contrasts, _ = training.compute_performance( combined_trials, prob_right=True) performance_easy = training.compute_performance_easy(combined_trials) reaction_time = training.compute_median_reaction_time(combined_trials) n_trials = training.compute_n_trials(combined_trials) sess_duration = np.nansum([s['sess_duration'] for s in sess_dicts]) n_delay = np.nanmax([s['n_delay'] for s in sess_dicts]) for sess_dict in sess_dicts: sess_dict['combined_performance'] = performance sess_dict['combined_contrasts'] = contrasts sess_dict['combined_performance_easy'] = performance_easy sess_dict['combined_reaction_time'] = reaction_time sess_dict['combined_n_trials'] = n_trials sess_dict['combined_sess_duration'] = sess_duration sess_dict['combined_n_delay'] = n_delay # Case where two sessions on same day with different number of contrasts! Oh boy if sess_dict['combined_performance'].size != sess_dict[ 'performance'].size: sess_dict['performance'] = \ np.r_[sess_dict['performance'], np.full(sess_dict['combined_performance'].size - sess_dict['performance'].size, np.nan)] sess_dict['contrasts'] = \ np.r_[sess_dict['contrasts'], np.full(sess_dict['combined_contrasts'].size - sess_dict['contrasts'].size, np.nan)] else: for sess_dict in sess_dicts: sess_dict['combined_performance'] = sess_dict['performance'] sess_dict['combined_contrasts'] = sess_dict['contrasts'] sess_dict['combined_performance_easy'] = sess_dict[ 'performance_easy'] sess_dict['combined_reaction_time'] = sess_dict['reaction_time'] sess_dict['combined_n_trials'] = sess_dict['n_trials'] sess_dict['combined_sess_duration'] = sess_dict['sess_duration'] sess_dict['combined_n_delay'] = sess_dict['n_delay'] return sess_dicts