def test_neutral_also(): for term in list_of_terms: patient = Semiology( term.strip(), symptoms_side=Laterality.LEFT, dominant_hemisphere=Laterality.LEFT, ) ### ## # # if we want to use the dummy_data instead of real Semio2Brain DataFrame: # repo_dir, resources_dir, dummy_data_path, dummy_semiology_dict_path = \ # file_paths(dummy_data=True) # patient.data_frame, _, _ = MEGA_ANALYSIS( # excel_data=dummy_data_path, # n_rows=100, # usecols="A:DH", # header=1, # exclude_data=False, # plot=True, # ) # ## ### heatmap, _ = patient.get_num_datapoints_dict() assert isinstance(heatmap, dict)
def run_query(): parser = ArgumentParser(description="Epilepsy SVT query") parser.add_argument( 'semio' ) # where the data is locally for dev purposes when updating data to ensure works- make into test later parser.add_argument('symptoms_side') parser.add_argument( 'dominant_hemisphere') # add default later e.g. Laterality.LEFT parser.add_argument( '--true', '-t', action='store_true' ) # -t: future use to be able to run or omit a section of the code arguments = parser.parse_args() symptoms_side = arguments.semio dominant_hemisphere = arguments.dominant_hemisphere heatmap = Semiology( arguments.semio, Laterality.symptoms_side, Laterality.dominant_hemisphere, ) num_patients_dict, _ = heatmap.get_num_datapoints_dict() print('Result:', num_patients_dict) # output2 = output1.SOMEFUNCTION(arguments.true) # -t: future use to be able to run or omit a section of the code try: if arguments.true: pass else: pass except (TypeError): pass
def _test_neutral_only(self): self.gen_term = self.list_of_terms_wrapper() term = str(list(self.gen_term)) patient = Semiology( term.strip(), symptoms_side=Laterality.LEFT, dominant_hemisphere=Laterality.LEFT, ) heatmap, _ = patient.get_num_datapoints_dict() assert isinstance(heatmap, dict)
for term in list_of_terms: patient = Semiology( term.strip(), symptoms_side=Laterality.LEFT, dominant_hemisphere=Laterality.LEFT, include_postictals=True, ) ### ## # # if we want to use the dummy_data instead of real Semio2Brain DataFrame: # repo_dir, resources_dir, dummy_data_path, dummy_semiology_dict_path = \ # file_paths(dummy_data=True) # patient.data_frame, _, _ = MEGA_ANALYSIS( # excel_data=dummy_data_path, # n_rows=100, # usecols="A:DH", # header=1, # exclude_data=False, # plot=True, # ) # ## ### heatmap = patient.get_num_datapoints_dict() print("\nSemiology: ", term) print('\nResult:', heatmap, '\n')
Patient_VisualRight = Semiology( 'Visual', symptoms_side=Laterality.RIGHT, dominant_hemisphere=Laterality.NEUTRAL, normalise_to_localising_values=True, global_lateralisation= True, # again not relevant as using BAyesian only - not using the df from this, but just the lateralising values # include_et_topology_ez=False, # not relevant as using Bayesian only # include_cortical_stimulation=False, # include_spontaneous_semiology=True, ) # df_proportions, all_combind_gif_dfs = get_df_from_semiologies([Patient_VisualRight], method=method) # # we want <: # assert round(all_combind_gif_dfs.loc['Visual', 32], 3) < round(all_combind_gif_dfs.loc['Visual', 33], 3) num_datapoints_dict, all_combined_gif_df = Patient_VisualRight.get_num_datapoints_dict( method=method) assert round(all_combined_gif_df.loc[32, 'pt #s'], 3) < round( all_combined_gif_df.loc[33, 'pt #s'], 3) # new query patient = Semiology( # 'Figure of 4', # symptoms_side=Laterality.LEFT, # dominant_hemisphere=Laterality.LEFT, # 'Blink', # Laterality.NEUTRAL, # Laterality.LEFT, # 'All Automatisms (oral, automotor)', # Laterality.LEFT,
desc='Semiologies', bar_format="{l_bar}%s{bar}%s{r_bar}" % (Fore.RED, Fore.RESET)): if term.strip() in [ "No Semiology - Only Stimulation Studies", "Hypomotor" ]: continue patient = Semiology( term.strip(), symptoms_side=Laterality.NEUTRAL, dominant_hemisphere=Laterality.NEUTRAL, granular=True, # hierarchy reversal include_cortical_stimulation=False, include_et_topology_ez=False, include_spontaneous_semiology=True, # SS normalise_to_localising_values=True, # normalise to pt numbers include_paeds_and_adults=True, # paeds and adults ) num_datapoints_dict[ term.strip()], all_combined_gif_df = patient.get_num_datapoints_dict() # set the zero ones: num_datapoints_dict['Hypomotor'] = num_datapoints_dict['Epigastric'].copy() for k, v in num_datapoints_dict['Hypomotor'].items(): num_datapoints_dict['Hypomotor'][k] = 0 ali = pd.DataFrame.from_dict(num_datapoints_dict, orient='index') ali.to_csv(folder_filename.csv) print('done')
level=logging.INFO, filemode='w', ) rows = [] for semiology_term in get_all_semiology_terms(): for symptoms_side in sides: for dominant_hemisphere in sides: tic = time.time() semiology = Semiology( semiology_term, symptoms_side, dominant_hemisphere, ) try: scores_dict, _ = semiology.get_num_datapoints_dict() except Exception as e: logging.error(e) scores_dict = None toc = time.time() seconds = toc - tic if scores_dict is None: function = logging.error elif seconds > 1: function = logging.warning else: function = logging.info function(f'Semiology term: {semiology_term}') function(f'Symptoms side: {symptoms_side}') function(f'Dominant hemisphere: {dominant_hemisphere}') function(f'Time: {seconds} seconds')