Example #1
0
def set_scorer_a_and_h(scorer: Autoscorer, a_and_h_overlap: float = 0, 
                REM_length: int = 60, f_s: int = 100) -> None:
    assert 0 <= a_and_h_overlap <= 1, f"a_and_h_overlap must be between 0 and 1, not {a_and_h_overlap}!"
    if a_and_h_overlap == 0:
        scorer.a_and_h_idx = [(0, f_s*REM_length*a_and_h_overlap)]
    else:
        scorer.a_and_h_idx = []
Example #2
0
def set_scorer_times(scorer: Autoscorer, nrem_start_time: int = 4940,
                          nrem_end_time: int = 5000, rem_start_time: int = 5000,
                          rem_end_time: int = 5060) -> None:
    scorer.nrem_start_time = nrem_start_time
    scorer.nrem_end_time = nrem_end_time
    scorer.rem_start_time = rem_start_time
    scorer.rem_end_time = rem_end_time
    scorer.baseline_dict = {'RSWA_P': {'REM_0': {}, 'REM_1':{}}, 'RSWA_T': {}}
Example #3
0
def set_scorer_attributes(scorer: Autoscorer, rem_subsequences: int = 2, 
    channels: list = ['Chin', 'L Leg', 'R Leg'], nrem_start_time: int = 4940, 
    nrem_end_time: int = 5000, rem_start_time: int = 5000, rem_end_time: int = 5060,
    a_and_h_overlap: float = 0, f_s: int = 100) -> None:
    scorer.p_event_idx = make_event_idx(channels = channels)
    set_scorer_baseline_dict(scorer = scorer, rem_subsequences = rem_subsequences,
                             channels = channels)
    set_scorer_times(scorer = scorer, nrem_start_time = nrem_start_time,
                          nrem_end_time = nrem_end_time, rem_start_time = rem_start_time,
                          rem_end_time = rem_end_time)
    scorer.a_and_h_idx = []
Example #4
0
def test_peaky():
    scorer = Autoscorer(t_amplitude_threshold = 3,
                 t_continuity_threshold = 10, p_mode = 'mean',
                 p_amplitude_threshold = 2, p_quantile = 0.99,
                 p_continuity_threshold = 1, p_baseline_length = 120,
                 ignore_hypoxics_duration = 15, return_seq = True, 
                 return_tuple = False)
    set_scorer_attributes(scorer)
    scorer.rem_subseq = 0
    data = make_fake_data(mode = 'peaky',  baseline_type = 'low', REM_length = 60)
    results = scorer.findP_over_threshold(data = data)
    print(f" scorer a_and_h_index: {scorer.a_and_h_idx}")
    return results
Example #5
0
def make_evaluation_file():
    path_to_IDs = '/Users/danielyaeger/Documents/Modules/sleep-research-ml/data/supplemental/test_predictions_cnn_multi_channel_window_20.p'
    results = {}
    # Open file to find unique IDs
    with open(path_to_IDs, 'rb') as fh:
        data = pickle.load(fh)
    unique_IDs = set([x.split('_')[0] for x in data.keys()])
    for ID in unique_IDs:
        scorer = Autoscorer(ID=ID,
                            t_amplitude_threshold=1,
                            t_continuity_threshold=20,
                            p_mode='quantile',
                            p_amplitude_threshold=4,
                            p_quantile=0.5,
                            p_continuity_threshold=5,
                            return_seq=True,
                            return_concat=True,
                            return_multilabel_track=True,
                            return_matrix_event_track=True,
                            return_tuple=False)
        predictions = scorer.score_REM()
        annotations = scorer.get_annotations()
        for subseq in predictions.keys():
            num = subseq.split('_')[-1]
            file_name = ID + '_' + num
            if file_name in data.keys():
                results[file_name] = {}
                results[file_name]['targets'] = annotations[subseq]
                results[file_name]['predictions'] = predictions[subseq]
                evaluator = Evaluator(predictions=predictions[subseq],
                                      annotations=annotations[subseq],
                                      sequence=True,
                                      single_ID=True,
                                      single_subseq=True)
                results[file_name]['evaluation'] = {}
                results[file_name]['evaluation'][
                    'balanced_accuracy'] = evaluator.balanced_accuracy_signals(
                    )
                results[file_name]['evaluation'][
                    'confusion_matrix'] = evaluator.confusion_matrix_signals()
                print(results[file_name])
    with open(
            '/Users/danielyaeger/Documents/Modules/sleep-research-ml/data/supplemental/test_predictions_rule_based_scorer.p',
            'wb') as fout:
        pickle.dump(obj=results, file=fout)
    return results
Example #6
0
def set_scorer_baseline_dict(scorer: Autoscorer, rem_subsequences: int = 2,
                             channels: list = ['Chin', 'L Leg', 'R Leg']) -> None:
    baseline_dict = {'RSWA_P': {}, 'RSWA_T': {}}
    for i in range(rem_subsequences):
        baseline_dict['RSWA_P'][f'REM_{i}'] = {}
        baseline_dict['RSWA_T'][f'REM_{i}'] = {}
        for channel in channels:
            baseline_dict['RSWA_T'][f'REM_{i}'][channel] = {}
    scorer.baseline_dict = baseline_dict
Example #7
0
def generalization(data_path = Path('/Users/danielyaeger/Documents/processed_data/processed'),
                 partition_file_name = 'data_partition.p', partition_mode = "test"):
    balanced_accuracies = []
    interepoch_agreements = []
    diagnosis_accuracy = []
    with data_path.joinpath(partition_file_name).open('rb') as fh:
        ID_list = list(pickle.load(fh)[partition_mode])
        ID_list = [x for x in ID_list if len(re.findall('[0-9A-Z]', x)) > 0]
    ID_list = [s.split('_')[0] for s in ID_list]
    ID_list = list(set(ID_list))
    for ID in ID_list:
        scorer = Autoscorer(ID = ID, data_path = data_path,
                 f_s = 10,
                 t_amplitude_threshold = 4,
                 t_continuity_threshold = 30, 
                 p_mode = 'quantile', 
                 p_amplitude_threshold = 2, 
                 p_quantile = 0.9,
                 p_continuity_threshold = 8, 
                 p_baseline_length = 120, 
                 ignore_hypoxics_duration = 15,
                 return_seq = True, return_concat = True, 
                 return_tuple = False, 
                 phasic_start_time_only = True,
                 return_multilabel_track = True,
                 verbose = False)
        predictions = scorer.score_REM()
        annotations = scorer.get_annotations()
        for seq in predictions:
            evaluator = Evaluator(predictions = predictions[seq], annotations = annotations[seq], 
                 sequence = True, single_ID = True, single_subseq = True, verbose = False)
            balanced_accuracies.append(evaluator.balanced_accuracy_signals())
            interepoch_agreements.append(evaluator.cohen_kappa_epoch())
            diagnosis_accuracy.append(evaluator.accuracy_score_diagnosis())
            print(f"For ID: {ID}\tsubseq: {seq}\tbalanced_accuracy: {balanced_accuracies[-1]}\tinter_epoch_agreement: {interepoch_agreements[-1]}\tdiagnosis_accuracy: {diagnosis_accuracy[-1]}")
    print(f'Mean balanced accuracy: {np.mean(np.array(balanced_accuracies))}')
    print(f"Standard deviation balanced accuracy: {np.std(np.array(balanced_accuracies))}")
    print(f"Mean Inter-epoch agreement: {np.mean(np.array(interepoch_agreements))}")
    print(f"Standard deviation inter-epoch agreement: {np.std(np.array(interepoch_agreements))}")
    print(f"Mean diagnostic accuracy: {np.mean(np.array(diagnosis_accuracy))}")
    print(f"Standard deviation diagnostic accuracy: {np.std(np.array(diagnosis_accuracy))}")
    return np.array(balanced_accuracies), np.array(interepoch_agreements), np.array(diagnosis_accuracy)       
        
Example #8
0
 def _score(self, ID) -> tuple:
     """ Calls the autoscorer score_REM method with the input ID. Returns
     a tuple containing:
         (results_dict, annotations_dict, collisions)"""
     scorer = Autoscorer(ID = ID, data_path = self.data_path,
              f_s = self.f_s,
              t_amplitude_threshold = self.t_amplitude_threshold,
              t_continuity_threshold = self.t_continuity_threshold, 
              p_mode = self.p_mode, 
              p_amplitude_threshold = self.p_amplitude_threshold, 
              p_quantile = self.p_quantile,
              p_continuity_threshold = self.p_continuity_threshold, 
              p_baseline_length = self.p_baseline_length, 
              ignore_hypoxics_duration = self.ignore_hypoxics_duration,
              return_seq = self.return_seq, return_concat = self.return_concat, 
              return_tuple = self.return_tuple, 
              phasic_start_time_only = self.phasic_start_time_only,
              return_multilabel_track = self.return_multilabel_track,
              verbose = self.verbose)
     if self.return_multilabel_track:
         return (scorer.score_REM(), scorer.get_annotations(), scorer.get_collisions())
     else:
         return (scorer.score_REM(), scorer.get_annotations())