y_nanremoved = y[~np.isnan(y)]
x_nanremoved = x[~np.isnan(y)]
corr = spearmanr(x_nanremoved,y_nanremoved)
#axes.set(yscale='log') 



corr_r = np.round(corr[0], 2)
corr_p = np.round(corr[1], 8)
axes.set_title(f"{corr_r}, p = {corr_p}")

#plt.savefig(join(paths.SEIZURE_SPREAD_FIGURES,"connectivity", "sc_vs_spread_time_all_seizures.pdf"), bbox_inches='tight')     
"""

#%
fig, axes = utils.plot_make(size_length=5)
g = sns.regplot(data = sc_vs_quickness_group_fill, x = "sc_LR_mean", y= "inverse_quickness", scatter_kws = dict( linewidth=0, s=100), ci = None, line_kws=dict(lw = 7))

#g = sns.regplot(data = sc_vs_quickness_group_fill[~np.isnan(sc_vs_quickness_filt_group["inverse_quickness"])], x = "sc_LR_mean", y= "inverse_quickness", scatter_kws = dict( linewidth=0, s=100), ci = None, line_kws=dict(lw = 7))
#g = sns.scatterplot(data = sc_vs_quickness_group_fill, x = "sc_LR_mean", y= "inverse_quickness", linewidth=0, s=100)


sc_vs_quickness_group_fill_spreaders_only = sc_vs_quickness_group_fill[~np.isnan(sc_vs_quickness_filt_group["inverse_quickness"])]

x = sc_vs_quickness_group_fill["sc_LR_mean"]
y = sc_vs_quickness_group_fill["inverse_quickness"]
y_nanremoved = y[~np.isnan(y)]
x_nanremoved = x[~np.isnan(y)]

corr = spearmanr(x_nanremoved,y_nanremoved)
corr = pearsonr(x_nanremoved,y_nanremoved)
Exemplo n.º 2
0
                                       ignore_index=True)
        print(f"{s}; {np.round(percent_wm_50, 2)}")

#%%
means = []
np.median(df_percent["gm_50"])
np.median(df_percent["gm_dist"])
np.median(df_percent["gm_arg"])

np.median(df_percent["wm_50"])
np.median(df_percent["wm_dist"])
np.median(df_percent["wm_arg"])

fig, axes = utils.plot_make(r=3,
                            c=2,
                            size_length=15,
                            sharex=False,
                            sharey=True)
axes = axes.flatten()
sns.histplot(data=df_percent,
             x="gm_50",
             bins=20,
             kde=True,
             ax=axes[0],
             color=plot.COLORS_TISSUE_LIGHT_MED_DARK[1][0],
             lw=0,
             line_kws=dict(lw=5))
sns.histplot(data=df_percent,
             x="gm_dist",
             bins=20,
             kde=True,
Exemplo n.º 3
0
def calculate_mean_rank_deep_learning(i,
                                      patientsWithseizures,
                                      version,
                                      threshold=0.6,
                                      smoothing=20,
                                      model_ID="WN",
                                      secondsAfter=180,
                                      secondsBefore=180,
                                      type_of_overlap="soz",
                                      override_soz=False,
                                      seconds_active=None,
                                      tanh=False):

    #%%
    RID = np.array(patientsWithseizures["subject"])[i]
    idKey = np.array(patientsWithseizures["idKey"])[i]
    seizure_length = patientsWithseizures.length[i]

    print(RID)
    #CHECKING IF SPREAD FILES EXIST

    fname = DataJson.get_fname_ictal(RID,
                                     "Ictal",
                                     idKey,
                                     dataset=datasetiEEG,
                                     session=session,
                                     startUsec=None,
                                     stopUsec=None,
                                     startKey="EEC",
                                     secondsBefore=secondsBefore,
                                     secondsAfter=secondsAfter)

    spread_location = join(BIDS, datasetiEEG_spread, f"v{version:03d}",
                           f"sub-{RID}")
    spread_location_file_basename = f"{splitext(fname)[0]}_spread.pickle"
    spread_location_file = join(spread_location, spread_location_file_basename)

    feature_name = "absolute_slope"
    location_feature = join(BIDS, datasetiEEG_spread, "single_features",
                            f"sub-{RID}")
    location_abs_slope_basename = f"{splitext(fname)[0]}_{feature_name}.pickle"
    location_abs_slope_file = join(location_feature,
                                   location_abs_slope_basename)

    feature_name = "line_length"
    location_line_length_basename = f"{splitext(fname)[0]}_{feature_name}.pickle"
    location_line_length_file = join(location_feature,
                                     location_line_length_basename)

    feature_name = "power_broadband"
    location_power_broadband_basename = f"{splitext(fname)[0]}_{feature_name}.pickle"
    location_power_broadband = join(location_feature,
                                    location_power_broadband_basename)

    if seconds_active is None:
        seconds = np.arange(0, 60 * 2 + 1, 1)
    else:
        seconds = seconds_active
    percent_active_vec = np.zeros(len(seconds))
    percent_active_vec[:] = np.nan

    if utils.checkIfFileExists(spread_location_file,
                               printBOOL=False) and utils.checkIfFileExists(
                                   location_abs_slope_file, printBOOL=False):
        #print("\n\n\n\nSPREAD FILE EXISTS\n\n\n\n")

        #Getting SOZ labels
        RID_keys = list(jsonFile["SUBJECTS"].keys())
        hup_num_all = [jsonFile["SUBJECTS"][x]["HUP"] for x in RID_keys]

        hup_int = hup_num_all[RID_keys.index(RID)]
        hup_int_pad = f"{hup_int:03d}"

        #i_patient = patients.index(f"HUP{hup_int_pad}")
        #HUP = patients[i_patient]
        #hup = int(HUP[3:])

        #channel_names = labels[i_patient]
        #soz_ind = np.where(soz[i_patient] == 1)[0]
        #soz_channel_names = np.array(channel_names)[soz_ind]

        #resected_ind = np.where(resect[i_patient] == 1)[0]
        #resected_channel_names = np.array(channel_names)[resected_ind]

        #ignore_ind = np.where(ignore[i_patient] == 1)[0]
        #ignore__channel_names = np.array(channel_names)[ignore_ind]

        #soz_channel_names = echobase.channel2std(soz_channel_names)
        #resected_channel_names = echobase.channel2std(resected_channel_names)
        #ignore__channel_names = echobase.channel2std(ignore__channel_names)

        #soz_channel_names = channel2std_ECoG(soz_channel_names)
        #resected_channel_names = channel2std_ECoG(resected_channel_names)
        #ignore__channel_names = channel2std_ECoG(ignore__channel_names)
        #%
        THRESHOLD = threshold
        SMOOTHING = smoothing  #in seconds

        if model_ID == "WN" or model_ID == "CNN" or model_ID == "LSTM":
            with open(spread_location_file, 'rb') as f:
                [
                    probWN, probCNN, probLSTM, data_scalerDS, channels, window,
                    skipWindow, secondsBefore, secondsAfter
                ] = pickle.load(f)

        if model_ID == "WN":
            #print(model_ID)
            prob_array = probWN
        elif model_ID == "CNN":
            #print(model_ID)
            prob_array = probCNN
        elif model_ID == "LSTM":
            #print(model_ID)
            prob_array = probLSTM
        elif model_ID == "absolute_slope":
            if utils.checkIfFileExists(location_abs_slope_file,
                                       printBOOL=False):
                with open(location_abs_slope_file, 'rb') as f:
                    [
                        abs_slope_normalized, abs_slope_normalized_tanh,
                        channels, window, skipWindow, secondsBefore,
                        secondsAfter
                    ] = pickle.load(f)
                if not tanh:
                    #abs_slope_normalized = utils.apply_arctanh(abs_slope_normalized_tanh)/1e-1
                    abs_slope_normalized / np.max(abs_slope_normalized)
                    abs_slope_normalized = abs_slope_normalized / np.max(
                        abs_slope_normalized)
                    prob_array = abs_slope_normalized
                else:
                    prob_array = abs_slope_normalized_tanh
            else:
                print(
                    f"{i} {RID} file does not exist {location_abs_slope_file}\n"
                )
        elif model_ID == "line_length":
            if utils.checkIfFileExists(location_line_length_file,
                                       printBOOL=False):
                with open(location_line_length_file, 'rb') as f:
                    [
                        probLL, probLL_tanh, channels, window, skipWindow,
                        secondsBefore, secondsAfter
                    ] = pickle.load(f)
                if not tanh:
                    probLL = probLL / np.max(probLL)
                    prob_array = probLL
                else:
                    prob_array = probLL_tanh
            else:
                print(
                    f"{i} {RID} file does not exist {location_line_length_file}\n"
                )
        elif model_ID == "power_broadband":
            if utils.checkIfFileExists(location_power_broadband,
                                       printBOOL=False):
                with open(location_power_broadband, 'rb') as f:
                    [
                        power_total, power_total_tanh, channels, window,
                        skipWindow, secondsBefore, secondsAfter
                    ] = pickle.load(f)
                if not tanh:
                    #power_total = utils.apply_arctanh(power_total_tanh)/7e-2
                    power_total = power_total / np.max(power_total)
                    prob_array = power_total

                else:
                    prob_array = power_total_tanh

            else:
                print(
                    f"{i} {RID} file does not exist {location_power_broadband}\n"
                )
        else:
            print("model ID not recognized. Using Wavenet")
            prob_array = probWN

        #####
        seizure_start = int((secondsBefore - 0) / skipWindow)
        seizure_stop = int((secondsBefore + seizure_length) / skipWindow)

        probability_arr_movingAvg, probability_arr_threshold = prob_threshold_moving_avg(
            prob_array, fsds, skip, threshold=THRESHOLD, smoothing=SMOOTHING)
        #sns.heatmap( probability_arr_movingAvg.T )
        #sns.heatmap( probability_arr_threshold.T)
        spread_start, seizure_start, spread_start_loc, channel_order, channel_order_labels = get_start_times(
            secondsBefore, skipWindow, fsds, channels, 0, seizure_length,
            probability_arr_threshold)

        channel_order_labels = remove_EGG_and_ref(channel_order_labels)
        channels2 = remove_EGG_and_ref(channels)

        channel_order_labels = channel2std_ECoG(channel_order_labels)
        channels2 = channel2std_ECoG(channels2)

        #print(soz_channel_names)
        #print(resected_channel_names)
        #print(channel_order_labels)

        #remove ignore electrodes from channel_order_labels
        #ignore_index = np.intersect1d(  channel_order_labels, ignore__channel_names, return_indices=True)
        #channel_order_labels[-ignore_index[1]]
        #channel_order_labels = np.delete(channel_order_labels, ignore_index[1])

        print(i)
        RID = np.array(patientsWithseizures["subject"])[i]
        seizure = np.array(patientsWithseizures["idKey"])[i]
        seizure_length = patientsWithseizures.length[i]

        #atlas
        atlas = "BN_Atlas_246_1mm"
        atlas = "AAL3v1_1mm"
        #atlas = "AAL2"
        #atlas = "HarvardOxford-sub-ONLY_maxprob-thr25-1mm"

        atlas_names_short = list(atlas_files["STANDARD"].keys())
        atlas_names = [
            atlas_files["STANDARD"][x]["name"] for x in atlas_names_short
        ]
        ind = np.where(f"{atlas}.nii.gz" == np.array(atlas_names))[0][0]
        atlas_label_name = atlas_files["STANDARD"][
            atlas_names_short[ind]]["label"]

        atlas_label = pd.read_csv(join(paths.ATLAS_LABELS, atlas_label_name))
        atlas_label_names = np.array(atlas_label.iloc[1:, 1])
        atlas_label_region_numbers = np.array(atlas_label.iloc[1:, 0])

        connectivity_loc = join(
            paths.BIDS_DERIVATIVES_STRUCTURAL_MATRICES, f"sub-{RID}",
            "ses-research3Tv[0-9][0-9]", "matrices",
            f"sub-{RID}.{atlas}.count.pass.connectogram.txt")

        connectivity_loc_glob = glob.glob(connectivity_loc)

        if len(connectivity_loc_glob) > 0:
            connectivity_loc_path = connectivity_loc_glob[0]

            sc = utils.read_DSI_studio_Txt_files_SC(connectivity_loc_path)
            sc = sc / sc.max()
            #sc = utils.log_normalize_adj(sc)
            #sc=utils.log_normalize_adj(sc)
            sc_region_labels = utils.read_DSI_studio_Txt_files_SC_return_regions(
                connectivity_loc_path, atlas).astype(ind)

            atlas_localization_path = join(
                paths.BIDS_DERIVATIVES_ATLAS_LOCALIZATION, f"sub-{RID}",
                f"ses-{session}",
                f"sub-{RID}_ses-{session}_desc-atlasLocalization.csv")
            if utils.checkIfFileExists(atlas_localization_path,
                                       printBOOL=False):
                atlas_localization = pd.read_csv(atlas_localization_path)

                atlas_localization.channel = channel2std_ECoG(
                    atlas_localization.channel)
                #get channels in hipp

                ##############################################################
                ##############################################################
                ##############################################################
                ##############################################################
                ##############################################################
                ##############################################################
                #find the activation time between channels

                spread_start
                channels2

                sc_vs_time = pd.DataFrame(columns=["ch1", "ch2", "time", "sc"])

                ch1 = 1
                ch2 = 10
                """
                for ch1 in range(len(channels2)):
                    for ch2 in range(len(channels2)):
                        time_between = abs(spread_start[ch1] - spread_start[ch2])*skipWindow
                        if spread_start[ch1]*skipWindow > seizure_length:
                            time_between = np.nan
                            
                        if spread_start[ch2]*skipWindow > seizure_length:
                            time_between = np.nan
                        
                        ch1_name = channels2[ch1]
                        ch2_name = channels2[ch2]
                        ch1_ind_atlas_loc_ind = np.where(ch1_name == atlas_localization.channel )[0]
                        ch2_ind_atlas_loc_ind = np.where(ch2_name == atlas_localization.channel )[0]
                        if len(ch1_ind_atlas_loc_ind) >0:
                            if len(ch2_ind_atlas_loc_ind)>0:
                                region_num1 = atlas_localization[f"{atlas}_region_number"][ch1_ind_atlas_loc_ind[0]]
                                region_num2 = atlas_localization[f"{atlas}_region_number"][ch2_ind_atlas_loc_ind[0]]
                                
                                
                                
                                if region_num1 in sc_region_labels:
                                    if region_num2 in sc_region_labels:
                                        if not region_num2 == region_num1:
                                            ch1_sc_ind = np.where(region_num1 == sc_region_labels)[0][0]
                                            ch2_sc_ind = np.where(region_num2 == sc_region_labels)[0][0]
                                            connectivity = sc[ch1_sc_ind, ch2_sc_ind]
                                            
                                            sc_vs_time = sc_vs_time.append( dict(ch1 = ch1_name, ch2 = ch2_name , time = time_between, sc = connectivity) , ignore_index=True)
                        
                sc_vs_time["inverse"] = 1/sc_vs_time["time"]
                fig, axes = utils.plot_make()
                sns.regplot(data = sc_vs_time, x = "sc", y = "time", ax = axes, scatter_kws=dict(s = 1))
                
                sc_vs_time.loc[sc_vs_time["time"] == 0, "inverse"] = 0
                
                sc_vs_time_nanfill = sc_vs_time.fillna(0)
                
                axes.set_ylim([0,1])            
                axes.set_xlim([0,0.5])            
                
                """
                #get the average time each region was active
                region_times = pd.DataFrame(columns=["region", "time"])
                for r in range(len(sc_region_labels)):
                    reg_num = sc_region_labels[r]
                    channels_in_reg = np.where(
                        reg_num ==
                        atlas_localization[f"{atlas}_region_number"])[0]

                    reg_starts = []
                    if len(channels_in_reg) > 0:
                        for ch in range(len(channels_in_reg)):
                            ch_name = atlas_localization.channel[
                                channels_in_reg[ch]]
                            ch_in_spread = np.where(ch_name == channels2)[0]
                            if len(ch_in_spread) > 0:
                                if spread_start[ch_in_spread[
                                        0]] * skipWindow > seizure_length:
                                    reg_starts.append(np.nan)
                                else:
                                    reg_starts.append(
                                        spread_start[ch_in_spread[0]] *
                                        skipWindow)
                        reg_mean = np.nanmean(reg_starts)
                    else:
                        reg_mean = np.nan
                    region_times = region_times.append(dict(region=reg_num,
                                                            time=reg_mean),
                                                       ignore_index=True)

                sc_vs_time_reg = pd.DataFrame(
                    columns=["reg1", "reg2", "time", "sc"])

                for r1 in range(len(sc)):
                    for r2 in range(r1 + 1, len(sc)):
                        connectvity = sc[r1, r2]
                        time_diff = abs(region_times.iloc[r1, 1] -
                                        region_times.iloc[r2, 1])
                        sc_vs_time_reg = sc_vs_time_reg.append(
                            dict(reg1=sc_region_labels[r1],
                                 reg2=sc_region_labels[r2],
                                 time=time_diff,
                                 sc=connectvity),
                            ignore_index=True)

                #fig, axes = utils.plot_make()
                #sns.scatterplot(data = sc_vs_time_reg, x = "sc", y = "time", linewidth = 0, s=5)

                fig, axes = utils.plot_make(size_length=5)
                g = sns.regplot(data=sc_vs_time_reg,
                                x="sc",
                                y="time",
                                scatter_kws=dict(linewidth=0, s=50),
                                ci=None,
                                line_kws=dict(lw=7, color="black"))

                x = sc_vs_time_reg["sc"]
                y = sc_vs_time_reg["time"]
                y_nanremoved = y[~np.isnan(y)]
                x_nanremoved = x[~np.isnan(y)]
                corr = spearmanr(x_nanremoved, y_nanremoved)
                corr = pearsonr(x_nanremoved, y_nanremoved)
                corr_r = np.round(corr[0], 3)
                corr_p = np.round(corr[1], 8)

                axes.set_title(f"r = {corr_r}, p = {corr_p}")
                #axes.set_ylim([-0.033,0.2])
                for l, tick in enumerate(axes.xaxis.get_major_ticks()):
                    tick.label.set_fontsize(6)
                axes.tick_params(width=4)
                # change all spines
                for axis in ['top', 'bottom', 'left', 'right']:
                    axes.spines[axis].set_linewidth(6)

#%%
            plt.savefig(join(paths.SEIZURE_SPREAD_FIGURES, "connectivity",
                             f"sc_vs_spread_time_SINGLE_PATIENT_{RID}_01.pdf"),
                        bbox_inches='tight')

            #sns.regplot(data = sc_vs_time_reg, x = "time", y = "sc", scatter_kws=dict(s = 1))

            sc_vs_time_reg["inverse"] = 1 / sc_vs_time_reg["time"]
            #sc_vs_time_reg.loc[sc_vs_time["time"] == 0, "inverse"] = 0

            sc_vs_time_reg_fill = copy.deepcopy(sc_vs_time_reg)
            sc_vs_time_reg_fill = sc_vs_time_reg_fill.fillna(0)

            sns.scatterplot(data=sc_vs_time_reg_fill,
                            x="sc",
                            y="inverse",
                            linewidth=0,
                            s=5)

            fig, axes = utils.plot_make(size_length=5)
            g = sns.regplot(data=sc_vs_time_reg_fill,
                            x="sc",
                            y="time",
                            scatter_kws=dict(linewidth=0, s=20),
                            ci=None,
                            line_kws=dict(lw=5, color="black"))

            x = sc_vs_time_reg_fill["sc"]
            y = sc_vs_time_reg_fill["time"]
            y_nanremoved = y[~np.isnan(y)]
            x_nanremoved = x[~np.isnan(y)]
            corr = spearmanr(x_nanremoved, y_nanremoved)
            corr = pearsonr(x_nanremoved, y_nanremoved)
            corr_r = np.round(corr[0], 2)
            corr_p = np.round(corr[1], 10)

            axes.set_title(f"{corr_r}, p = {corr_p}")
            #axes.set_ylim([-0.033,0.2])
            for i, tick in enumerate(axes.xaxis.get_major_ticks()):
                tick.label.set_fontsize(6)
            axes.tick_params(width=4)
            # change all spines
            for axis in ['top', 'bottom', 'left', 'right']:
                axes.spines[axis].set_linewidth(6)

            plt.savefig(join(paths.SEIZURE_SPREAD_FIGURES, "connectivity",
                             f"sc_vs_spread_time_SINGLE_PATIENT_{RID}_02.pdf"),
                        bbox_inches='tight')
print(len(np.where(spread_start_loc_WM/fsds - secondsBefore < timeee)[0])/nchan)
print(len(np.where(spread_start_loc_CNN/fsds - secondsBefore < timeee)[0])/nchan)
print(len(np.where(spread_start_loc_LSTM/fsds - secondsBefore < timeee)[0])/nchan)
print(len(np.where(spread_start_loc_LL/fsds - secondsBefore < timeee)[0])/nchan)

#%%Plotting


WN_per = np.array([1, 0.8,0.75,0.625,0.2857142857,1,0.1,0.6,0.875,0.7777777778])
CNN_per = np.array([0.75,0.6,0.25,0.25,0.2857142857,0.875,0.1,0.4,0.875,0.8888888889])
LL = np.array([0.75,0.8,0.125,0.125,0.4285714286,0.5,0,0.6,0.125,0.5555555556])

df_per = pd.concat([pd.DataFrame(WN_per),pd.DataFrame(CNN_per), pd.DataFrame(LL)], axis= 1)
df_per.columns = ["WN", "CNN", "LL"]

fig, axes = utils.plot_make()
sns.boxplot(data = df_per, palette = ["#90b2e5","#9990e5", "#e5c390"], fliersize=0)
sns.swarmplot(data = df_per, palette = ["#163460","#1e1660", "#604216"])
sns.despine()


palette = ["#90b2e5","#9990e5", "#e5c390"]
plt.setp(axes.lines, zorder=100); plt.setp(axes.collections, zorder=100, label="")

for a in range(len( axes.artists)):
    mybox = axes.artists[a]
    # Change the appearance of that box
    mybox.set_facecolor(palette[a])
    mybox.set_edgecolor(palette[a])
    #mybox.set_linewidth(3)
count = 0
Exemplo n.º 5
0
def calculate_mean_rank_deep_learning(i, patientsWithseizures, version, threshold=0.6, smoothing = 20, model_ID="WN", secondsAfter=180, secondsBefore=180, tanh = False):
    #override_soz if True, then if there are no soz marking, then use the resection markings and assume those are SOZ contacts
    RID = np.array(patientsWithseizures["subject"])[i]
    idKey = np.array(patientsWithseizures["idKey"])[i]
    seizure_length = patientsWithseizures.length[i]
    
    
    #CHECKING IF SPREAD FILES EXIST

    fname = DataJson.get_fname_ictal(RID, "Ictal", idKey, dataset= datasetiEEG, session = session, startUsec = None, stopUsec= None, startKey = "EEC", secondsBefore = secondsBefore, secondsAfter = secondsAfter )
    
    spread_location = join(BIDS, datasetiEEG_spread, f"v{version:03d}", f"sub-{RID}" )
    spread_location_file_basename = f"{splitext(fname)[0]}_spread.pickle"
    spread_location_file = join(spread_location, spread_location_file_basename)
    
    
    feature_name = "absolute_slope"
    location_feature = join(BIDS, datasetiEEG_spread, "single_features", f"sub-{RID}" )
    location_abs_slope_basename = f"{splitext(fname)[0]}_{feature_name}.pickle"
    location_abs_slope_file = join(location_feature, location_abs_slope_basename)
    
    feature_name = "line_length"
    location_line_length_basename = f"{splitext(fname)[0]}_{feature_name}.pickle"
    location_line_length_file = join(location_feature, location_line_length_basename)
    
    feature_name = "power_broadband"
    location_power_broadband_basename = f"{splitext(fname)[0]}_{feature_name}.pickle"
    location_power_broadband = join(location_feature, location_power_broadband_basename)
   
    
    if utils.checkIfFileExists( spread_location_file , printBOOL=False) and utils.checkIfFileExists( location_abs_slope_file , printBOOL=False):
        #print("\n\n\n\nSPREAD FILE EXISTS\n\n\n\n")
    
    
    
        #Getting SOZ labels
        RID_keys =  list(jsonFile["SUBJECTS"].keys() )
        hup_num_all = [jsonFile["SUBJECTS"][x]["HUP"]  for  x   in  RID_keys]
        
        hup_int = hup_num_all[RID_keys.index(RID)]
        hup_int_pad = f"{hup_int:03d}" 
        
        i_patient = patients.index(f"HUP{hup_int_pad}")
        HUP = patients[i_patient]
        hup = int(HUP[3:])
        
    
        
        channel_names = labels[i_patient]
        soz_ind = np.where(soz[i_patient] == 1)[0]
        soz_channel_names = np.array(channel_names)[soz_ind]
        
        resected_ind = np.where(resect[i_patient] == 1)[0]
        resected_channel_names = np.array(channel_names)[resected_ind]
        
        ignore_ind = np.where(ignore[i_patient] == 1)[0]
        ignore__channel_names = np.array(channel_names)[ignore_ind]
        
        soz_channel_names = echobase.channel2std(soz_channel_names)
        resected_channel_names = echobase.channel2std(resected_channel_names)
        #ignore__channel_names = echobase.channel2std(ignore__channel_names)
        

        
        soz_channel_names = channel2std_ECoG(soz_channel_names)
        resected_channel_names = channel2std_ECoG(resected_channel_names)
        ignore__channel_names = channel2std_ECoG(ignore__channel_names)
        #%
        THRESHOLD = threshold
        SMOOTHING = smoothing #in seconds
        
    
        
        if model_ID == "WN" or model_ID == "CNN" or model_ID == "LSTM":
            with open(spread_location_file, 'rb') as f:[probWN, probCNN, probLSTM, data_scalerDS, channels, window, skipWindow, secondsBefore, secondsAfter] = pickle.load(f)
            
        
        if model_ID == "WN":
            #print(model_ID)
            prob_array= probWN
        elif model_ID == "CNN":
            #print(model_ID)
            prob_array= probCNN
        elif model_ID == "LSTM":
            #print(model_ID)
            prob_array= probLSTM
        elif model_ID == "absolute_slope":
            if utils.checkIfFileExists(location_abs_slope_file, printBOOL=False):
                with open(location_abs_slope_file, 'rb') as f:[abs_slope_normalized, abs_slope_normalized_tanh, channels, window, skipWindow, secondsBefore, secondsAfter] = pickle.load(f)
                if not tanh:
                    #abs_slope_normalized = utils.apply_arctanh(abs_slope_normalized_tanh)/1e-1 
                    abs_slope_normalized/np.max(abs_slope_normalized)
                    abs_slope_normalized = abs_slope_normalized/np.max(abs_slope_normalized)
                    prob_array=  abs_slope_normalized
                else:
                    prob_array= abs_slope_normalized_tanh
            else: 
                print(f"{i} {RID} file does not exist {location_abs_slope_file}\n")
        elif model_ID == "line_length":
            if utils.checkIfFileExists(location_line_length_file, printBOOL=False):
                with open(location_line_length_file, 'rb') as f:[probLL, probLL_tanh, channels, window, skipWindow, secondsBefore, secondsAfter] = pickle.load(f)
                if not tanh:
                    probLL = probLL/np.max(probLL)
                    prob_array= probLL
                else:
                    prob_array= probLL_tanh
            else: 
                print(f"{i} {RID} file does not exist {location_line_length_file}\n")
        elif model_ID == "power_broadband":
            if utils.checkIfFileExists(location_power_broadband, printBOOL=False):
                with open(location_power_broadband, 'rb') as f:[power_total, power_total_tanh, channels, window, skipWindow, secondsBefore, secondsAfter] = pickle.load(f)
                if not tanh:
                    #power_total = utils.apply_arctanh(power_total_tanh)/7e-2  
                    power_total = power_total/np.max(power_total)
                    prob_array=  power_total
                    
                else:
                    prob_array= power_total_tanh
            
            else: 
                print(f"{i} {RID} file does not exist {location_power_broadband}\n")
        else:
            print("model ID not recognized. Using Wavenet")
            prob_array= probWN
        
        #####
        seizure_start = int((secondsBefore-0)/skipWindow)
        seizure_stop = int((secondsBefore + seizure_length)/skipWindow)
        
        probability_arr_movingAvg, probability_arr_threshold = prob_threshold_moving_avg(prob_array, fsds, skip, threshold = THRESHOLD, smoothing = SMOOTHING)
        #sns.heatmap( probability_arr_movingAvg.T )      
        #sns.heatmap( probability_arr_threshold.T)    
        spread_start, seizure_start, spread_start_loc, channel_order, channel_order_labels = get_start_times(secondsBefore, skipWindow, fsds, channels, 0, seizure_length, probability_arr_threshold)
   
        
        channel_order_labels = remove_EGG_and_ref(channel_order_labels)
        channels2 = remove_EGG_and_ref(channels)
        
        channel_order_labels = channel2std_ECoG(channel_order_labels)
        channels2 = channel2std_ECoG(channels2)
        
        #print(soz_channel_names)
        #print(resected_channel_names)
        #print(channel_order_labels)
    
    
        #remove ignore electrodes from channel_order_labels
        #ignore_index = np.intersect1d(  channel_order_labels, ignore__channel_names, return_indices=True)
        #channel_order_labels[-ignore_index[1]]
        #channel_order_labels = np.delete(channel_order_labels, ignore_index[1])
        
        
        atlas_localization_path = join(paths.BIDS_DERIVATIVES_ATLAS_LOCALIZATION, f"sub-{RID}", f"ses-{session}", f"sub-{RID}_ses-{session}_desc-atlasLocalization.csv")
        if utils.checkIfFileExists(atlas_localization_path, printBOOL=False):
            atlas_localization = pd.read_csv(atlas_localization_path)
            
            
            atlas_localization.channel = channel2std_ECoG(atlas_localization.channel)
            
            
            for r in range(len(atlas_localization)):
                reg_AAL = atlas_localization.AAL_label[r]
                reg_BNA = atlas_localization.BN_Atlas_246_1mm_label[r]
                reg_HO = atlas_localization["HarvardOxford-combined_label"][r]
                
                
        coord_start_times = pd.DataFrame(columns = ["channel", "x", "y", "z", "start_time"])
        
        coord_start_times["channel"] = channels2
        
        for ch in range(len(coord_start_times)):
            x = np.array(atlas_localization[channels2[ch]  == atlas_localization.channel]["x"])[0]
            y = np.array(atlas_localization[channels2[ch]  == atlas_localization.channel]["y"])[0]
            z = np.array(atlas_localization[channels2[ch]  == atlas_localization.channel]["z"])[0]
            
            x = np.array(atlas_localization[channels2[ch]  == atlas_localization.channel]["x"])[0]
            y = np.array(atlas_localization[channels2[ch]  == atlas_localization.channel]["y"])[0]
            z = np.array(atlas_localization[channels2[ch]  == atlas_localization.channel]["z"])[0]
            
            
            coord_start_times.loc[coord_start_times["channel"] == coord_start_times["channel"][ch]   ,    "x"] = x
            coord_start_times.loc[coord_start_times["channel"] == coord_start_times["channel"][ch]   ,    "y"] = y
            coord_start_times.loc[coord_start_times["channel"] == coord_start_times["channel"][ch]   ,    "z"] = z
            
            spread_start
            
            channel_start = spread_start[coord_start_times["channel"][ch] == channels2 ] * skipWindow
            if len(channel_start) > 0:
                channel_start = channel_start[0]
                if channel_start > seizure_length:
                    channel_start = np.nan
            else:
                channel_start = np.nan
            coord_start_times.loc[coord_start_times["channel"] == coord_start_times["channel"][ch]   ,    "start_time"] = channel_start
          
            
        t1_image = glob.glob(join(paths.BIDS_DERIVATIVES_ATLAS_LOCALIZATION, f"sub-{RID}", f"ses-implant01", "tmp", "orig_nu_std.nii.gz" ))[0]
        t1_image_brain = glob.glob(join(paths.BIDS_DERIVATIVES_ATLAS_LOCALIZATION, f"sub-{RID}", f"ses-implant01", "tmp", "brain_std.nii.gz" ))[0]
        img = nib.load(t1_image)
        img_brain = nib.load(t1_image_brain)
        #utils.show_slices(img, data_type = "img")
        img_data = img.get_fdata()
        brain_data = img_brain.get_fdata()
        affine = img.affine
        shape = img_data.shape
        img_data_total = copy.deepcopy(img_data)
        img_data_total[  np.where(img_data_total != 0)  ] = 0
        
        img_data_N = copy.deepcopy(img_data)
        img_data_N[  np.where(img_data_N != 0)  ] = 0
        
        
       
        for ch in range(len(coord_start_times)):
            print(f"\r{ch}/{len(coord_start_times)}    ", end = "\r")
            coord = coord_start_times.iloc[ch]
            radius = 40
            
            img_data_sphere = copy.deepcopy(img_data)
            img_data_sphere[  np.where(img_data_sphere != 0)  ] = 0
            
            coordinates = np.array(coord[["x", "y", "z"]]).astype(float)
            coordinates_voxels = utils.transform_coordinates_to_voxel(coordinates, affine)
            x,y,z = coordinates_voxels[0],coordinates_voxels[1],coordinates_voxels[2]
            sphere = utils.make_sphere_from_point(img_data_sphere, x,   y,  z, radius = radius)
            if not np.isnan(coord_start_times.start_time[ch]):
                img_data_N = img_data_N + sphere
                sphere[np.where(sphere >0)] = coord_start_times.start_time[ch]
                img_data_total = img_data_total + sphere
            
            #utils.show_slices(sphere, data_type = "data")
            
        utils.show_slices(img_data_N, data_type = "data")
        utils.show_slices(img_data_total, data_type = "data", cmap = "mako")
        
        
        utils.show_slices(brain_data, data_type = "data")
        
    
    
        img_data_avg = img_data_total/img_data_N
        img_data_avg[np.where(brain_data <= 0)] = np.nan
        
        utils.show_slices(img, data_type = "img")
        utils.show_slices(img_data_avg, data_type = "data", cmap = "mako")
        
        #img_data_avg[np.isnan(img_data_avg)] = seizure_length
        
        low, middle, high = 0.33,0.48,0.7
        slices_t1 = [   img_data[:, int((img_data.shape[1]*low)), : ] , img_data[:, int(img_data.shape[1]*middle), :] , img_data[:, int(img_data.shape[1]*high), :]   ]
        slices_heat = [   img_data_avg[:, int((img_data_avg.shape[1]*low)), : ] , img_data_avg[:, int(img_data_avg.shape[1]*middle), :] , img_data_avg[:, int(img_data_avg.shape[1]*high), :]   ]
        slices_brain = [   brain_data[:, int((brain_data.shape[1]*low)), : ] , brain_data[:, int(brain_data.shape[1]*middle), :] , brain_data[:, int(brain_data.shape[1]*high), :]   ]
        
        cmap1 = "gray"
        cmap2 = "Wistia_r"
        """
        fig, axes = utils.plot_make()
        #sns.heatmap(slices_t1[1], cmap=cmap1, ax = axes, square = True)
        axes.imshow(slices_t1[1].T, cmap=cmap1, origin="lower")
        pos = axes.imshow(slices_heat[1].T, cmap=cmap2, origin="lower")
        fig.colorbar(pos, ax=axes)
        """
        slice_image = slices_heat[1]
        
        mask = np.where(~np.isnan(slice_image))
        interp = interpolate.NearestNDInterpolator(np.transpose(mask), slice_image[mask])
        filled_data = interp(*np.indices(slice_image.shape))
        
        filled_data_copy_gaussian = scipy.ndimage.gaussian_filter(filled_data, sigma = 2)
        
        filled_data_copy = copy.deepcopy(filled_data_copy_gaussian)
        filled_data_copy[np.where(slices_brain[1] <= 0)] = np.nan

        plt.style.use('default')
        cmap1 = "gray"
        cmap2 = "Spectral"
        fig, axes = utils.plot_make()
        axes.imshow(slices_t1[1].T, cmap=cmap1, origin="lower")
        pos = axes.imshow(filled_data_copy.T, cmap=cmap2, origin="lower")
        fig.colorbar(pos, ax=axes)
    

        
        plt.savefig(join(paths.SEIZURE_SPREAD_FIGURES, "spread_by_coordinates", "spread_by_coordinates2.pdf"))
Exemplo n.º 6
0
    #for slice_num in range(50,140):
    print(slice_num)
    slice_num = int(slice_num)
    img_data.shape

    slice1 = utils.get_slice(img_data,
                             slice_ind=1,
                             slice_num=slice_num,
                             data_type="data")

    cmap = copy.copy(plt.get_cmap("magma"))
    masked_array = np.ma.masked_where(slice1 >= 1, slice1)
    cmap.set_bad(color='#cdcdcd')
    cmap.set_under('white')
    fig, axes = utils.plot_make()
    axes.imshow(masked_array, cmap=cmap, origin="lower", vmin=vmin, vmax=vmax)
    axes.set_xticklabels([])
    axes.set_yticklabels([])
    axes.set_xticks([])
    axes.set_yticks([])
    axes.axis("off")
    pos = axes.imshow(masked_array,
                      cmap=cmap,
                      origin="lower",
                      vmin=vmin,
                      vmax=vmax)
    fig.colorbar(pos, ax=axes)
    axes.set_title(f"{slice_num}")
    plt.savefig(join(paths.SEIZURE_SPREAD_FIGURES, "clustering",
                     f"COLOR_cluster_atlas_{cluster}_slice_{slice_num}.pdf"),
Exemplo n.º 7
0
with open(full_analysis_location_file, 'rb') as f: [soz_overlap, percent_active, tanh, seconds_active, by, thresholds, window, skipWindow, secondsBefore, secondsAfter] = pickle.load(f)

#%%
palette = {"WN": "#1d5e9e", "CNN": "#73ace5", "LSTM": "#7373e5", "absolute_slope": "#961c1d", "line_length": "#d16a6a" , "power_broadband": "#d19e6a" }
palette_dark = {"WN": "#0a2036", "CNN": "#1e60a1", "LSTM": "#3737b3", "absolute_slope": "#250b0b", "line_length": "#5b1c1c" , "power_broadband": "#5b3c1c" }



#%%
soz_overlap_median = soz_overlap.groupby(['model', 'subject', "threshold"], as_index=False).median()


soz_overlap_outcomes = pd.merge(soz_overlap, outcomes, on='subject')
soz_overlap_median_outcomes = pd.merge(soz_overlap_median, outcomes, on='subject')

fig, axes = utils.plot_make(size_length=24, size_height=10)
sns.lineplot(data = soz_overlap_median, x = "threshold", y = "median_rank_percent", hue="model", ci=95, estimator=np.mean, ax = axes, hue_order=model_IDs, palette=palette, lw=6 , err_kws = dict(alpha = 0.075))

# change all spines
for axis in ['top','bottom','left','right']:
    axes.spines[axis].set_linewidth(6)

# increase tick width
axes.tick_params(width=4)
axes.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
axes.set_xlim([0,1])

#plt.savefig(join(paths.SEIZURE_SPREAD_FIGURES,"validation", "over_thresholds.pdf"), bbox_inches='tight')

find_lowest = soz_overlap_median.groupby(["model", "threshold"], as_index=False).median()
for m in range(len(model_IDs)):