Пример #1
0
def test_neutral_also():
    for term in list_of_terms:
        patient = Semiology(
            term.strip(),
            symptoms_side=Laterality.LEFT,
            dominant_hemisphere=Laterality.LEFT,
        )

        ###
        ##
        # # if we want to use the dummy_data instead of real Semio2Brain DataFrame:
        # repo_dir, resources_dir, dummy_data_path, dummy_semiology_dict_path = \
        #     file_paths(dummy_data=True)

        # patient.data_frame, _, _ = MEGA_ANALYSIS(
        #     excel_data=dummy_data_path,
        #     n_rows=100,
        #     usecols="A:DH",
        #     header=1,
        #     exclude_data=False,
        #     plot=True,
        # )
        #
        ##
        ###

        heatmap, _ = patient.get_num_datapoints_dict()
        assert isinstance(heatmap, dict)
def run_query():
    parser = ArgumentParser(description="Epilepsy SVT query")
    parser.add_argument(
        'semio'
    )  # where the data is locally for dev purposes when updating data to ensure works- make into test later
    parser.add_argument('symptoms_side')
    parser.add_argument(
        'dominant_hemisphere')  # add default later e.g. Laterality.LEFT
    parser.add_argument(
        '--true', '-t', action='store_true'
    )  # -t: future use to be able to run or omit a section of the code
    arguments = parser.parse_args()

    symptoms_side = arguments.semio
    dominant_hemisphere = arguments.dominant_hemisphere

    heatmap = Semiology(
        arguments.semio,
        Laterality.symptoms_side,
        Laterality.dominant_hemisphere,
    )
    num_patients_dict, _ = heatmap.get_num_datapoints_dict()
    print('Result:', num_patients_dict)

    # output2 = output1.SOMEFUNCTION(arguments.true)

    # -t: future use to be able to run or omit a section of the code
    try:
        if arguments.true:
            pass
        else:
            pass
    except (TypeError):
        pass
 def _test_neutral_only(self):
     self.gen_term = self.list_of_terms_wrapper()
     term = str(list(self.gen_term))
     patient = Semiology(
         term.strip(),
         symptoms_side=Laterality.LEFT,
         dominant_hemisphere=Laterality.LEFT,
     )
     heatmap, _ = patient.get_num_datapoints_dict()
     assert isinstance(heatmap, dict)
def wrapper_TS_GIFs(
    p_S_norm,
    global_lateralisation: bool = False,
    include_paeds_and_adults: bool = True,
    include_only_postictals: bool = False,
    symptom_laterality='NEUTRAL',
    dominance='NEUTRAL',
    normalise_to_localising_values=True,
    hierarchy_reversal: bool = True,
):
    """
    Get all Gifs for all semiologies from Toplogical (include_spontaneous_semiology=False) studies, add their dict values and use this as the denominator to return p_S_given_GIF.
    As it queries all semiologies, too labour intensive to do on the fly, hence results are cached for the double NEUTRAL case.
    See below to make more efficient.
    """
    # initialise
    pt = {}
    all_combined_gifs_superdict = {}
    added_all_gifs = {}
    added_all_gifs = Counter(added_all_gifs)
    p_S_given_GIF = pd.DataFrame()

    for semiology in p_S_norm.index:
        pt[semiology] = Semiology(
            semiology,
            include_spontaneous_semiology=False,  # crucial
            symptoms_side=Laterality.NEUTRAL,
            dominant_hemisphere=Laterality.NEUTRAL,
            include_postictals=False,
            include_paeds_and_adults=include_paeds_and_adults,
            normalise_to_localising_values=normalise_to_localising_values,
            global_lateralisation=global_lateralisation,
        )
        pt[semiology].include_only_postictals = include_only_postictals
        pt[semiology].granular = hierarchy_reversal
        all_combined_gifs_superdict[semiology], all_combined_gif_df = pt[
            semiology].get_num_datapoints_dict(
                method='not proportions'
            )  # method=anything but default proportions

    # now add the dictionaries: this seems to repeat what marginal_GIF_probabilities does - can make more efficient
    for semio_dict_result, v in all_combined_gifs_superdict.items(
    ):  # equivalent: for semiology in p_S_norm.index:
        temp_dict = Counter(all_combined_gifs_superdict[semio_dict_result])
        added_all_gifs = added_all_gifs + temp_dict
    # turn counter back to dict
    added_all_gifs = dict(added_all_gifs)

    # so totals for each semiology, given a GIF, is added_all_gifs[GIF #]
    # now we need to look at each individual GIF and semio
    for semiology in p_S_norm.index:
        for GIF_no, v in all_combined_gifs_superdict[semiology].items():
            p_S_given_GIF.loc[semiology, GIF_no] = v / added_all_gifs[GIF_no]

    return p_S_given_GIF
    def test_combine_semiologies_proportions_sum_to_1(self):
        patient = Semiology(
            'Head Version',
            symptoms_side=Laterality.LEFT,
            dominant_hemisphere=Laterality.NEUTRAL,
            normalise_to_localising_values=True,  # default is False
        )
        patient2 = Semiology(
            'Epigastric',
            symptoms_side=Laterality.LEFT,
            dominant_hemisphere=Laterality.NEUTRAL,
            normalise_to_localising_values=True,  # default is False
        )
        ###
        ##
        # # if we want to use the dummy_data instead of real Semio2Brain DataFrame:
        # repo_dir, resources_dir, dummy_data_path, dummy_semiology_dict_path = \
        #     file_paths(dummy_data=True)

        # patient.data_frame, _, _ = MEGA_ANALYSIS(
        #     excel_data=dummy_data_path,
        #     n_rows=100,
        #     usecols="A:DH",
        #     header=1,
        #     exclude_data=False,
        #     plot=True,
        # )
        ##
        ###
        ###
        ##
        # # if we want to set top_level_lobes to True:
        # patient.granular = False
        # patient.top_level_lobes = True

        df = combine_semiologies([patient, patient2],
                                 normalise_method='proportions')

        assert (df.sum(axis=1)).all() == 1
Пример #6
0
import mega_analysis
from mega_analysis import Semiology, Laterality
from pathlib import Path

from mega_analysis.crosstab.file_paths import file_paths
from mega_analysis.crosstab.mega_analysis.MEGA_ANALYSIS import MEGA_ANALYSIS

file = Path(__file__).parent/'resources' / \
    'semiologies_postictalsonly_neutral_also.txt'
list_of_terms = list(open(file, 'r'))

for term in list_of_terms:
    patient = Semiology(
        term.strip(),
        symptoms_side=Laterality.LEFT,
        dominant_hemisphere=Laterality.LEFT,
        include_postictals=True,
    )

    ###
    ##
    # # if we want to use the dummy_data instead of real Semio2Brain DataFrame:
    # repo_dir, resources_dir, dummy_data_path, dummy_semiology_dict_path = \
    #     file_paths(dummy_data=True)

    # patient.data_frame, _, _ = MEGA_ANALYSIS(
    #     excel_data=dummy_data_path,
    #     n_rows=100,
    #     usecols="A:DH",
    #     header=1,
    #     exclude_data=False,
import mega_analysis
from mega_analysis import Semiology, Laterality

from mega_analysis.crosstab.file_paths import file_paths
from mega_analysis.crosstab.mega_analysis.MEGA_ANALYSIS import MEGA_ANALYSIS
from mega_analysis.semiology import get_df_from_semiologies, combine_semiologies

method = 'Bayesian only'
Patient_VisualRight = Semiology(
    'Visual',
    symptoms_side=Laterality.RIGHT,
    dominant_hemisphere=Laterality.NEUTRAL,
    normalise_to_localising_values=True,
    global_lateralisation=
    True,  # again not relevant as using BAyesian only - not using the df from this, but just the lateralising values
    # include_et_topology_ez=False,  # not relevant as using Bayesian only
    # include_cortical_stimulation=False,
    # include_spontaneous_semiology=True,
)
# df_proportions, all_combind_gif_dfs = get_df_from_semiologies([Patient_VisualRight], method=method)
# # we want <:
# assert round(all_combind_gif_dfs.loc['Visual', 32], 3) < round(all_combind_gif_dfs.loc['Visual', 33], 3)

num_datapoints_dict, all_combined_gif_df = Patient_VisualRight.get_num_datapoints_dict(
    method=method)
assert round(all_combined_gif_df.loc[32, 'pt #s'], 3) < round(
    all_combined_gif_df.loc[33, 'pt #s'], 3)

# new query
patient = Semiology(
    # 'Figure of 4',
num_datapoints_dict = {}

for term in tqdm(['Epigastric'],
                 desc='Semiologies',
                 bar_format="{l_bar}%s{bar}%s{r_bar}" %
                 (Fore.RED, Fore.RESET)):
    if term.strip() in [
            "No Semiology - Only Stimulation Studies", "Hypomotor"
    ]:
        continue
    patient = Semiology(
        term.strip(),
        symptoms_side=Laterality.NEUTRAL,
        dominant_hemisphere=Laterality.NEUTRAL,
        granular=True,  # hierarchy reversal
        include_cortical_stimulation=False,
        include_et_topology_ez=False,
        include_spontaneous_semiology=True,  # SS
        normalise_to_localising_values=True,  # normalise to pt numbers
        include_paeds_and_adults=True,  # paeds and adults
    )

    num_datapoints_dict[
        term.strip()], all_combined_gif_df = patient.get_num_datapoints_dict()

# set the zero ones:
num_datapoints_dict['Hypomotor'] = num_datapoints_dict['Epigastric'].copy()
for k, v in num_datapoints_dict['Hypomotor'].items():
    num_datapoints_dict['Hypomotor'][k] = 0

ali = pd.DataFrame.from_dict(num_datapoints_dict, orient='index')
Пример #9
0
sides = Laterality.LEFT, Laterality.RIGHT

logging.basicConfig(
    filename=log_path,
    level=logging.INFO,
    filemode='w',
)

rows = []
for semiology_term in get_all_semiology_terms():
    for symptoms_side in sides:
        for dominant_hemisphere in sides:
            tic = time.time()
            semiology = Semiology(
                semiology_term,
                symptoms_side,
                dominant_hemisphere,
            )
            try:
                scores_dict, _ = semiology.get_num_datapoints_dict()
            except Exception as e:
                logging.error(e)
                scores_dict = None
            toc = time.time()
            seconds = toc - tic
            if scores_dict is None:
                function = logging.error
            elif seconds > 1:
                function = logging.warning
            else:
                function = logging.info
from mega_analysis.crosstab.file_paths import file_paths
from mega_analysis.crosstab.mega_analysis.MEGA_ANALYSIS import MEGA_ANALYSIS


patient = Semiology(
    # 'Figure of 4',
    # symptoms_side=Laterality.LEFT,
    # dominant_hemisphere=Laterality.LEFT,

    # 'Blink',
    # Laterality.NEUTRAL,
    # Laterality.LEFT,

    # 'All Automatisms (oral, automotor)',
    # Laterality.LEFT,
    # Laterality.LEFT,

    # 'Grimace', Laterality.NEUTRAL, Laterality.NEUTRAL,

    # 'latexceedsloc',  # switch to dummy data
    # symptoms_side=Laterality.LEFT,
    # dominant_hemisphere=Laterality.LEFT,

    'Tonic',
    symptoms_side=Laterality.LEFT,
    dominant_hemisphere=Laterality.LEFT,
    normalise_to_localising_values=False,  # default is False
)

###
##
# # if we want to use the dummy_data instead of real Semio2Brain DataFrame:
Пример #11
0
from mega_analysis.crosstab.mega_analysis.MEGA_ANALYSIS import MEGA_ANALYSIS

file = Path(__file__).parent / 'resources' / 'semiologies_neutral_only.txt'
list_of_terms = list(open(file, 'r'))

# list_of_terms = ['Mimetic Automatisms',
#                  'No Semiology - Only Stimulation Studies',
#                  'Non-Specific Aura', 'Olfactory-Gustatory', 'Palilalia', 'Psychic', 'Spitting', 'Vestibular', 'Vocalisation', 'Whistling']

for term in tqdm(list_of_terms,
                 desc='Neutral Only Semiologies',
                 bar_format="{l_bar}%s{bar}%s{r_bar}" %
                 (Fore.RED, Fore.RESET)):
    patient = Semiology(
        term.strip(),
        symptoms_side=Laterality.NEUTRAL,
        dominant_hemisphere=Laterality.NEUTRAL,
    )

    ###
    ##
    # # if we want to use the dummy_data instead of real Semio2Brain DataFrame:
    # repo_dir, resources_dir, dummy_data_path, dummy_semiology_dict_path = \
    #     file_paths(dummy_data=True)

    # patient.data_frame, _, _ = MEGA_ANALYSIS(
    #     excel_data=dummy_data_path,
    #     n_rows=100,
    #     usecols="A:DH",
    #     header=1,
    #     exclude_data=False,