Exemplo n.º 1
0
baseline_data.insert(1, 'marder_exc', baseline_data.loc[:, ['p4','p7', 'g8', 'g14']].sum(axis=1, skipna=False))
baseline_data.insert(1, 'marder_dep', baseline_data.loc[:, ['g2','g3', 'g4', 'g6']].sum(axis=1, skipna=False))

perm_string=sys.argv[1]
perm_list = perm_string.split(",")

df_index=0
for i in baseline_data['subject_id']:
    subject_id = '%03d' %i
    permuted_data[:,:,df_index] =np.load(f'results/null_dist/cortical_perm/{node_assignment}/network_kis_{subject_id}.npy')
    df_index = df_index+1

example_ki_perm = permuted_data[1,:,:].T
column_names = ["Net %d" % (i + 1) for i in range(example_ki_perm.shape[1])]
symptoms = clinical_data.loc[:,'p1':'g16'].columns
symptoms = symptoms.append(pd.Index(['marder_pos', 'marder_neg', 'marder_disorg', 'marder_exc', 'marder_dep']))
select_columns = np.union1d(symptoms,["Net 1", "Net 2", "Net 8", "Net 9", "Net 10"])

for i in range(int(perm_list[0]),int(perm_list[1])):
    ki_perm = permuted_data[i,:,:].T
    ki_perm_data = pd.DataFrame(data = ki_perm[0:29,:], columns = column_names, index = baseline_data['subject_id'].values).dropna(axis=1)
    ki_perm_data['subject_id'] = baseline_data['subject_id'].values
    perm_data = baseline_data.join(ki_perm_data.set_index('subject_id'), on='subject_id')
    required_data = perm_data[select_columns].dropna(axis=0)
    [perm_direct_corr_r, perm_direct_corr_p] = cb.cor_matrix(required_data, 'pearson')
    pvalue_list.append(perm_direct_corr_p)
    rvalue_list.append(perm_direct_corr_r)

results = [rvalue_list, pvalue_list]
pickle.dump(results, open(f'results/response_permutations/baseline_corticalpermuted_{node_assignment}{str(perm_list[0])}.pkl', "wb"))

# Baseline correlation matrix
def make_indices(df, panss1, panss2, panss3, panss4):
    symptoms = df.loc[:, panss1:panss2].columns
    symptoms2 = df.loc[:, panss3:panss4].columns
    div5 = df.loc[:, 'DMN':'AUD'].columns
    div3 = df.loc[:, 'wstr':'ast'].columns
    indices = symptoms.append((symptoms2, div5, div3))
    return indices


indices_bl = make_indices(dopa_data, 'marder_dep', 'marder_pos', 'p1', 'g16')
bl_corr_data = (
    dopa_data.loc[dopa_data['patient'] == 1].loc[:, indices_bl]).dropna(axis=1)
[bl_corr, bl_p] = cb.cor_matrix(bl_corr_data, 'pearson')
bl_corr.to_pickle(
    '/Users/robmcc/Documents/academic/DOPA_Symptoms/results/pickled_databases/baseline_raw_corr.pkl'
)
bl_p.to_pickle(
    '/Users/robmcc/Documents/academic/DOPA_Symptoms/results/pickled_databases/baseline_raw_p.pkl'
)

masked_bl = pd.DataFrame(np.asarray(bl_p < 0.05) * np.asarray(bl_corr),
                         index=bl_corr.index,
                         columns=bl_corr.columns)

response_data['cg7']

# Follow Up correlation matrix (n=20)
indices_ch = make_indices(response_data, 'cmarder_dep', 'cmarder_pos', 'cp1',
Exemplo n.º 3
0
import matplotlib.pyplot as plt
from statsmodels.stats.multitest import fdrcorrection
import os
from collections import defaultdict

# True values
baseline_data = pickle.load(
    open("/results/response_permutations/scripted_bl.pkl", "rb"))
baseline_symptom_data = baseline_data.loc[:, [
    'marder_dep', 'marder_disorg', 'marder_exc', 'marder_neg', 'marder_pos'
]]
baseline_subdiv_data = baseline_data.loc[:, [
    'AUD', 'CON', 'DAT', 'DMN', 'SMh', 'VIS', 'wstr', 'ast', 'lst', 'smst'
]].dropna(axis=1)
[true_corr_bl,
 true_p_bl] = cb.cor_matrix(baseline_symptom_data.join(baseline_subdiv_data),
                            'pearson')
true_bl = {}
for network in ['AUD', 'DMN', 'SMh', 'CON', 'DAT']:
    for marder_domain in ['pos', 'neg', 'disorg', 'exc', 'dep']:
        true_bl[f'{marder_domain}_{network}_z'] = true_corr_bl.loc[(
            f'marder_{marder_domain}'), network]

# Cortical permutations
number_permutations = 10000
cortical_bl_r = pickle.load(
    open(
        "/results/response_permutations/baseline_corticalpermuted_scripted.pkl",
        "rb"))
permute_cortical_bl = defaultdict(list)
for i in range(number_permutations):
    shuffled_corr_bl = cortical_bl_r[i]
Exemplo n.º 4
0
pet_csv_path = (results/pet_network_kis/node_ids.csv')
pet_data = pd.read_csv(pet_csv_path, names=['subject_id', 'DMN', 'SMh', 'SMm', 'VIS', 'FPN', 'CPN', 'RST', 'CON', 'DAT', 'AUD', 'VAT','SAL',' None'], index_col=None)
clinical_data = pd.read_csv('/results/copies_of_local/clinical.csv')
clinical_data = clinical_data.loc[clinical_data['include']==1]
dopa_data = clinical_data.join(pet_data.set_index('subject_id'), on = 'subject_id')
dopa_data.insert(1, 'marder_pos', dopa_data.loc[:, ['p1','p3', 'p5','p6','n7', 'g1', 'g9', 'g12']].sum(axis=1, skipna=False))
dopa_data.insert(1, 'marder_neg', dopa_data.loc[:, ['n1','n2', 'n3', 'n4', 'n6', 'g7', 'g16']].sum(axis=1, skipna=False))
dopa_data.insert(1, 'marder_disorg', dopa_data.loc[:, ['p2','n5', 'g5', 'g11', 'g13', 'g15', 'g10']].sum(axis=1, skipna=False))
dopa_data.insert(1, 'marder_exc', dopa_data.loc[:, ['p4','p7', 'g8', 'g14']].sum(axis=1, skipna=False))
dopa_data.insert(1, 'marder_dep', dopa_data.loc[:, ['g2','g3', 'g4', 'g6']].sum(axis=1, skipna=False))
# dopa_data.insert(1, 'classic_pos', dopa_data.loc[:, ['p1','p2','p3', 'p4','p5','p6','p7']].sum(axis=1, skipna=False))
baseline_data = dopa_data.loc[dopa_data['patient']==1]
baseline_data.to_pickle('/results/response_permutations/scripted_bl.pkl')
baseline_symptom_data =  baseline_data.loc[:, ['marder_dep', 'marder_disorg',  'marder_exc', 'marder_neg', 'marder_pos']]
baseline_subdiv_data = baseline_data.loc[:,['AUD', 'CON', 'DAT', 'DMN', 'SMh', 'VIS', 'wstr', 'ast', 'lst', 'smst']].dropna(axis=1)
[true_corr_bl, true_p_bl] = cb.cor_matrix(baseline_symptom_data.join(baseline_subdiv_data), 'pearson')


# Permute individuals
num_perm = 10000
array_size = baseline_symptom_data.shape[1] + baseline_subdiv_data.shape[1]
permute_indiv=np.empty([array_size, array_size, num_perm])
corr_diff=np.empty([array_size, array_size, num_perm])
shuffled_baseline_data =baseline_symptom_data.copy(deep=True)

for i in range(num_perm):
    np.random.seed(i) 
    shuffled_baseline_data = shuffled_baseline_data.set_index(np.random.permutation(baseline_data.index))
    shuffled_baseline_combined = shuffled_baseline_data.join(baseline_subdiv_data)
    [shuffled_corr_bl, shuffled_p_bl] = cb.cor_matrix(shuffled_baseline_combined, 'pearson')
    permute_indiv[:,:,i] = shuffled_corr_bl >=true_corr_bl