def wrapper_TS_GIFs(
    p_S_norm,
    global_lateralisation: bool = False,
    include_paeds_and_adults: bool = True,
    include_only_postictals: bool = False,
    symptom_laterality='NEUTRAL',
    dominance='NEUTRAL',
    normalise_to_localising_values=True,
    hierarchy_reversal: bool = True,
):
    """
    Get all Gifs for all semiologies from Toplogical (include_spontaneous_semiology=False) studies, add their dict values and use this as the denominator to return p_S_given_GIF.
    As it queries all semiologies, too labour intensive to do on the fly, hence results are cached for the double NEUTRAL case.
    See below to make more efficient.
    """
    # initialise
    pt = {}
    all_combined_gifs_superdict = {}
    added_all_gifs = {}
    added_all_gifs = Counter(added_all_gifs)
    p_S_given_GIF = pd.DataFrame()

    for semiology in p_S_norm.index:
        pt[semiology] = Semiology(
            semiology,
            include_spontaneous_semiology=False,  # crucial
            symptoms_side=Laterality.NEUTRAL,
            dominant_hemisphere=Laterality.NEUTRAL,
            include_postictals=False,
            include_paeds_and_adults=include_paeds_and_adults,
            normalise_to_localising_values=normalise_to_localising_values,
            global_lateralisation=global_lateralisation,
        )
        pt[semiology].include_only_postictals = include_only_postictals
        pt[semiology].granular = hierarchy_reversal
        all_combined_gifs_superdict[semiology], all_combined_gif_df = pt[
            semiology].get_num_datapoints_dict(
                method='not proportions'
            )  # method=anything but default proportions

    # now add the dictionaries: this seems to repeat what marginal_GIF_probabilities does - can make more efficient
    for semio_dict_result, v in all_combined_gifs_superdict.items(
    ):  # equivalent: for semiology in p_S_norm.index:
        temp_dict = Counter(all_combined_gifs_superdict[semio_dict_result])
        added_all_gifs = added_all_gifs + temp_dict
    # turn counter back to dict
    added_all_gifs = dict(added_all_gifs)

    # so totals for each semiology, given a GIF, is added_all_gifs[GIF #]
    # now we need to look at each individual GIF and semio
    for semiology in p_S_norm.index:
        for GIF_no, v in all_combined_gifs_superdict[semiology].items():
            p_S_given_GIF.loc[semiology, GIF_no] = v / added_all_gifs[GIF_no]

    return p_S_given_GIF
    normalise_to_localising_values=False,  # default is False
)

###
##
# # if we want to use the dummy_data instead of real Semio2Brain DataFrame:
# repo_dir, resources_dir, dummy_data_path, dummy_semiology_dict_path = \
#     file_paths(dummy_data=True)

# patient.data_frame, _, _ = MEGA_ANALYSIS(
#     excel_data=dummy_data_path,
#     n_rows=100,
#     usecols="A:DH",
#     header=1,
#     exclude_data=False,
#     plot=True,
# )
#
##
###

###
##
# # if we want to set top_level_lobes to True:
patient.granular = False
patient.top_level_lobes = True

heatmap = patient.get_num_datapoints_dict()
print("\nSemiology: ", patient.term)
print('\nResult:', heatmap, '\n')