-
Notifications
You must be signed in to change notification settings - Fork 0
/
runTE.py
477 lines (409 loc) · 28.7 KB
/
runTE.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
import glob
import os
import sys
import numpy as np
import pandas as pd
import utils
import argparse
import time
def startCalc(calc_type = 'ksg'):
"""
Start the JIDT calculator of the selected type
"""
# Find the location of the JIDT .jar file
if os.path.exists("../noradrenaline/bin/infodynamics.jar"):
jar_location = "../noradrenaline/bin/infodynamics.jar"
else:
jar_location = "/home/mike/Downloads/JIDT/infodynamics.jar"
assert calc_type == 'gaussian' or calc_type == 'ksg'
calc = utils.startCalc(measure = 'te', estimator = calc_type, jar_location = jar_location)
return calc
def computeTE(k, tau, dce, source_data, target_data, calc, source_history_length = 1,
source_delay = 1, source_target_delay = 1, compute_local = False,
compute_p = False, number_of_surrogates = 100, print_surrogate = False):
"""
Performs a calculation of Transfer Entropy using JIDT
Arguments:
k -- History length of target - JIDT parameter
tau -- Delay between target time points - JIDT parameter
dce -- Auto correlation length, used to set the dynamic correlation exclusion property
source_data -- 1D Numpy array containing the time series of the source region
target_data -- 1D Numpy array containing the time series of the target region
calc -- The JIDT calculator
source_history_length -- History length of source - JIDT parameter
source_delay -- Delay of source time points - JIDT parameter
source_target_delay -- Delay between source to target - JIDT parameter
compute_local -- If True, a timeseries of the local TE is returned instead of the average
compute_p -- If True, the p value of the average TE is calculated and returned
number_of_surrogates -- Number of surrogate distributions to test to estimate the p value
Returns:
TE -- Either locals or average, as a float or numpy array
p -- p value. Returned if compute_p is true, else None is returned
"""
calc.setProperty( "DYN_CORR_EXCL", str(dce) )
calc.setProperty( "BIAS_CORRECTION", "true" ) # Turns on bias correction for the gaussian estimator. The KSG Estimator is already bias adjusted
calc.initialise(k, tau, source_history_length, source_delay, source_target_delay) # Need to initialise after properties are set
calc.setObservations(source_data, target_data)
if compute_p:
measDist = calc.computeSignificance(number_of_surrogates)
if print_surrogate:
print("Surrogate Mean:", measDist.getMeanOfDistribution())
print("Surrogate Std:", measDist.getStdOfDistribution())
p = measDist.pValue
else:
p = None
if compute_local:
return np.array( calc.computeLocalOfPreviousObservations() ), p
else:
return calc.computeAverageLocalOfObservations(), p
def getLocalsForRegionPair(data, source_idx, target_idx, param_df, calc, compute_p = False, use_source_embedding = False):
"""
Calculates the local TE for a particular source region - target region pair
The parameters for target history length and target delay are passed in by param_df
Arguments:
data -- Numpy array. 2d array of shape (region, time). Preprocessing should have already been performed
source_idx -- The index of the region under consideration, as an Int
target_idx -- The index of the region under consideration, as an Int
param_df -- Pandas DataFrame containing the parameters used for each region, with the columns 'k', 'tau' and 'dce'
(dce is not present if the experiment has set_k_to_0 = True)
calc -- The JIDT calculator
compute_p -- If True, computes the p value of the returned TE
use_source_embedding -- If True, load up the values for history length and delay of the source from the AIS
calculations. Otherwise the source_history_length and source_delay parameters are set to 1
Returns:
result -- Numpy array of local TE values
p -- p value of the computed local TE. Returned if compute_p is true, else None is returned
dce -- The auto-correlation length used to set the dynamic correlation exclusion property for
the TE calculation
"""
# Extract the source and target time series from the full data array
source_data = data[source_idx]
target_data = data[target_idx]
# Extract the history length and delay parameters from param_df
history_length, delay = param_df.loc[target_idx, ['k', 'tau']]
if use_source_embedding:
source_history_length, source_delay = param_df.loc[source_idx, ['k', 'tau']]
else:
source_history_length, source_delay = 1, 1
dce = utils.getDCE(source_data, data_2 = target_data)
result, p = computeTE(history_length, delay, dce, source_data, target_data, calc, source_history_length, source_delay,
compute_local = True, compute_p = compute_p)
return result, p, dce
def getLocalsForAllRegionPairs(data, param_df, calc, compute_p = False, use_source_embedding = False, print_every = 50,
save_every = 20, saver = None, results = None, p_values = None, dce = None, idx_values = None):
"""
Calculates the local TE for all pairs of regions, by calling getLocalsForRegionPair
Arguments:
data -- Numpy array of shape (region, time). Preprocessing should have already been performed
param_df -- Pandas DataFrame containing the parameters used for each region, in the columns 'k', 'tau' and 'dce'
(dce is not present if the experiment has set_k_to_0 = True)
calc -- The JIDT calculator
compute_p -- If True, computes the p value of the returned TE
use_source_embedding -- If True, load up the values for history length and delay of the source from the AIS
calculations. Otherwise the source_history_length and source_delay parameters are set to 1
print_every -- None, or Int giving the number of regions to calculate before printing an update of the progress
save_every -- None, or Int giving the number of regions to calculate before saving the current state of the results
saver -- TEResultSaver object, used to save the intermediate results
results -- Loaded results from previous run, or None
p_values -- Loaded p_values from previous run, or None
dce -- Loaded DCE parameter values from previous run, or None
idx_values -- Tuple of the last (source_idx, target_idx) of the saved results from previous run, or None
Returns:
results -- A numpy array of shape (regions, regions, timepoints), containing the local TE values for each region to region pair
The first dimension corresponds to the source region, the second dimension corresponds to the target region
p_values -- A numpy array of shape (region, region) containing all returned p values (or Nones if compute_p is False).
Each row corresponds to a source region, and each column corresponds to a target region
dce -- Numpy array containing the DCE parameter values, of shape (source_region, target_region)
"""
regions, timepoints = data.shape
if idx_values is None:
# Start from the beginning. Initialise
starting_source_idx, starting_target_idx = 0, 0
results = np.zeros((regions, regions, timepoints))
p_values = np.zeros((regions, regions))
np.fill_diagonal(p_values, np.nan)
dce = np.zeros((regions, regions), dtype = int)
else:
# Continue from where the loaded results left off
starting_source_idx, starting_target_idx = idx_values
assert results is not None and p_values is not None and dce is not None
# Calculate the local TE for all source / target pairs
for source_idx in range(starting_source_idx, regions):
for target_idx in range(regions):
if source_idx == starting_source_idx and target_idx < starting_target_idx:
continue # Start calculations at (starting_source_idx, starting_target_idx)
if source_idx == target_idx:
results[source_idx, target_idx] = np.nan # Don't include the diagonal in any mean calculations
else:
if print_every is not None and (target_idx % print_every == 0) :
# Print progress bar
utils.update_progress((source_idx * regions + target_idx) / regions ** 2,
end_text = " {:4} -> {:4}".format(source_idx, target_idx))
results[source_idx, target_idx], p_values[source_idx, target_idx], dce[source_idx, target_idx] = getLocalsForRegionPair(data, source_idx, target_idx,
param_df, calc, compute_p,
use_source_embedding)
# Save intermediate results
if save_every is not None and saver is not None and (target_idx % save_every == 0):
saver.save_intermediate_result(results, p_values, dce, (source_idx, target_idx + 1)) # If loaded, start from (source_idx, target_idx + 1)
return results, p_values, dce
class TEResultSaver:
def __init__(self, filename, save_folder, raw_save_root = "/scratch/InfoDynFuncStruct/Mike/N-back/"):
"""
Arguments:
filename -- Base name of the files to be saved
save_folder -- Folder to save the final results (TE in and out of each region)
raw_save_root -- Root folder to save the raw TE results
"""
self.filename = filename
self.save_folder = save_folder
self.raw_save_root = raw_save_root
os.makedirs(os.path.join(raw_save_root, "Results/{}/TE/raw/p_values".format(save_folder)), exist_ok = True)
os.makedirs(os.path.join(raw_save_root, "Results/{}/TE/raw/dce".format(save_folder)), exist_ok = True)
os.makedirs("Results/{}/TE/In-Target".format(save_folder), exist_ok = True)
os.makedirs("Results/{}/TE/Out-Source".format(save_folder), exist_ok = True)
os.makedirs("Results/{}/TE/p_values".format(save_folder), exist_ok = True)
os.makedirs("Results/{}/TE/params".format(save_folder), exist_ok = True)
def save_intermediate_result(self, results, p_values, dce, idx_values):
"""
Arguments:
idx_values -- Tuple of (source_idx, target_idx) for the next source and target idx to be processed
"""
self.save_raw(results, p_values, dce, compress = False) # Don't compress the intermediate files, for faster saving and loading
# Write the values of the next (source_idx, target_idx) in a file
# The presence of this file will indicate that the final results have not be reached. It is deleted after the final results are saved
with open(os.path.join(self.raw_save_root, "Results/{}/TE/raw/{}_current_idx.txt".format(self.save_folder, self.filename)), 'w') as f:
f.write(str(idx_values[0]) + "," + str(idx_values[1]))
def save_final_result(self, results, p_values, dce, padding = ((0,0), (0,0)), compress = False):
"""
Saves the TE results - the raw array of shape (source_region, target_region, time) as well as the averaged local TE
out of each source region and into each target region
Arguments:
results -- Numpy array containing the raw TE results, of shape (source_region, target_region, time)
p_values -- Numpy array containing the p values, of shape (source_region, target_region)
dce -- Numpy array containing the DCE parameter values, of shape (source_region, target_region)
padding -- Tuple of tuples containing the number of spaces to pad the TE results at the start and end of each dimension
Follows the requirement specified by np.pad
compress -- If True, save the raw results as a npz format instead of npy
"""
self.save_raw(results, p_values, dce, compress)
# Take the average across source / target regions, ignoring the diagonals where source = target
target_te = np.nanmean(results, axis = 0) # Average across all sources
source_te = np.nanmean(results, axis = 1) # Average across all targets
# Add back the trimmed sections at the start and end of the timeseries by padding with zeros
target_te = np.pad(target_te, padding, mode = 'constant', constant_values = 0)
source_te = np.pad(source_te, padding, mode = 'constant', constant_values = 0)
pd.DataFrame(target_te).to_csv('Results/{}/TE/In-Target/{}.csv'.format(self.save_folder, self.filename), index = None, header = None)
pd.DataFrame(source_te).to_csv('Results/{}/TE/Out-Source/{}.csv'.format(self.save_folder, self.filename), index = None, header = None)
pd.DataFrame(p_values).to_csv('Results/{}/TE/p_values/{}_p.csv'.format(self.save_folder, self.filename), index = None, header = None)
pd.DataFrame(dce).to_csv('Results/{}/TE/params/{}_dce.csv'.format(self.save_folder, self.filename), index = None, header = None)
# Clean up intermediate save files
if compress: # The .npz files are kept, and npy files are removed
os.remove(os.path.join(self.raw_save_root, "Results/{}/TE/raw/{}.npy".format(self.save_folder, self.filename)))
os.remove(os.path.join(self.raw_save_root, "Results/{}/TE/raw/p_values/{}_p.npy".format(self.save_folder, self.filename)))
try:
os.remove(os.path.join(self.raw_save_root, "Results/{}/TE/raw/{}_current_idx.txt".format(self.save_folder, self.filename)))
except FileNotFoundError: # Don't need to remove if it was never saved
pass
# The file containing the next (source_idx, target_idx) is deleted after the final results are saved
def save_raw(self, results, p_values, dce, compress):
"""
Saves the raw files
Arguments:
results -- Numpy array containing the raw TE results, of shape (source_region, target_region, time)
p_values -- Numpy array containing the p values, of shape (source_region, target_region)
dce -- Numpy array containing the DCE parameter values, of shape (source_region, target_region)
compress -- If True, save the raw results as a npz format instead of npy. Only used when saving the final result
"""
if compress:
np.savez_compressed(os.path.join(self.raw_save_root, "Results/{}/TE/raw/{}.npz".format(self.save_folder, self.filename)), results = results)
np.savez_compressed(os.path.join(self.raw_save_root, "Results/{}/TE/raw/p_values/{}_p.npz".format(self.save_folder, self.filename)), p_values = p_values)
np.savez_compressed(os.path.join(self.raw_save_root, "Results/{}/TE/raw/dce/{}_dce.npz".format(self.save_folder, self.filename)), dce = dce)
else:
# Save temp file first then rename, in case the process gets killed during the save
np.save(os.path.join(self.raw_save_root, "Results/{}/TE/raw/{}_temp.npy".format(self.save_folder, self.filename)), results)
np.save(os.path.join(self.raw_save_root, "Results/{}/TE/raw/p_values/{}_temp.npy".format(self.save_folder, self.filename)), p_values)
np.save(os.path.join(self.raw_save_root, "Results/{}/TE/raw/dce/{}_temp.npy".format(self.save_folder, self.filename)), dce)
os.replace(os.path.join(self.raw_save_root, "Results/{}/TE/raw/{}_temp.npy".format(self.save_folder, self.filename)),
os.path.join(self.raw_save_root, "Results/{}/TE/raw/{}.npy".format(self.save_folder, self.filename)))
os.replace(os.path.join(self.raw_save_root, "Results/{}/TE/raw/p_values/{}_temp.npy".format(self.save_folder, self.filename)),
os.path.join(self.raw_save_root, "Results/{}/TE/raw/p_values/{}_p.npy".format(self.save_folder, self.filename)))
os.replace(os.path.join(self.raw_save_root, "Results/{}/TE/raw/dce/{}_temp.npy".format(self.save_folder, self.filename)),
os.path.join(self.raw_save_root, "Results/{}/TE/raw/dce/{}_dce.npy".format(self.save_folder, self.filename)))
def test_for_one_pair(filename = '100307.tsv', path = '../Data', source_region = 1, target_region = 0,
param_file = 'Results/HCP/AIS/params/100307_params.csv', calc_type = 'ksg', compute_p = False):
calc = startCalc(calc_type)
df, param_df = utils.loadData(filename, path, get_params = True, param_file = param_file)
data = utils.preprocess(df, sampling_rate = 1.3, mean_processing_type = 'removal', trim_start = 50, trim_end = 25)
result, p_values, dce = getLocalsForRegionPair(data, source_region, target_region, param_df, calc, compute_p = compute_p)
if p_values is not None:
print("p value:", p_values)
print('Dynamic correlation exclusion value:', dce)
utils.plotTimeseries(result)
def run(i, data_path, extension, save_folder, raw_save_root = "/scratch/InfoDynFuncStruct/Mike/N-back/", save_every = 20,
GRP = False, compute_p = True, compress = False, set_k_to_0 = False, calc_type = 'ksg', use_source_embedding = False,
**preprocessing_params):
"""
Run TE calculation for a particular subject. Parameters are loaded from file, based on the AIS calculation, or set
to 0 if set_k_to_0 is True
Arguments:
i -- An Int which states which file or subject to load and process
data_path -- Location of the data files
extension -- File extension of the data (eg. .csv, .tsv, .mat)
save_folder -- Subfolder of the 'Results' directory in which to save the local AIS values, parameters and p_values
raw_save_root -- Location to save the raw local TE values (as a npz or npy file)
save_every -- None, or Int giving the number of regions to calculate before saving the current state of the results
GRP -- Set to True if processing the GRP data, which is one array of dimension (region, timepoints, subject)
compute_p -- If True, computes the p value of the returned AIS
compress -- If True, the raw TE values are saved as a compressed npz file instead of an npy file
set_k_to_0 -- If True, skip loading of k and l parameters, instead initialising the DataFrame to zeros
calc_type -- The type of estimator to use for the JIDT calculator - 'gaussian' or 'ksg'
use_source_embedding -- If True, load up the values for history length and delay of the source from the AIS
calculations. Otherwise the source_history_length and source_delay parameters are set to 1
preprocessing_params -- Parameters passed to utils.preprocess for preprocessing the time series data.
Includes sampling_rate / sampling_interval, mean_processing_type, trim_start, trim_end,
fcutlow, fcuthigh, use_filtfilt
"""
start_time = time.time()
files = utils.getAllFiles(data_path, extension)
if GRP:
file = files[0]
filename = '{:02}'.format(i) # Save the results by the subjects number
subject_id = i
else:
file = files[i]
filename = utils.basename(file)
subject_id = None
print("Processing file {}: {}".format(i, filename))
# Check for the presence of the current_idx file
# If it's not present, then either no calculations have been done, or the final results have already been saved
if os.path.isfile(os.path.join(raw_save_root, "Results/{}/TE/raw/{}_current_idx.txt".format(save_folder, filename))):
# Load previous results, which are always saved in the uncompressed format
results = np.load(os.path.join(raw_save_root, "Results/{}/TE/raw/{}.npy".format(save_folder, filename)))
p_values = np.load(os.path.join(raw_save_root, "Results/{}/TE/raw/p_values/{}_p.npy".format(save_folder, filename)))
dce = np.load(os.path.join(raw_save_root, "Results/{}/TE/raw/dce/{}_dce.npy".format(save_folder, filename)))
with open(os.path.join(raw_save_root, "Results/{}/TE/raw/{}_current_idx.txt".format(save_folder, filename)), 'r') as f:
idx_values = f.readline()
idx_values = list(map(int,idx_values.split(',')))
print("Loading previous results")
print("Starting from index", idx_values)
else:
results, p_values, dce, idx_values = None, None, None, None
# Check both compressed and uncompressed options. If this file exists but the current_idx file doesn't then the
# final results have already been saved. Exit to avoid running again
if glob.glob(os.path.join(raw_save_root, "Results/{}/TE/raw/p_values/{}_p.np*".format(save_folder, filename))):
print("Result already present")
exit()
# Load parameter file
param_file = 'Results/{}/AIS/params/{}_params.csv'.format(save_folder, filename)
if set_k_to_0:
df = utils.loadData(file, get_params = False, subject_id = subject_id)
param_df = pd.DataFrame( np.zeros((len(df), 2), dtype = int), columns = ['k', 'tau'])
else:
df, param_df = utils.loadData(file, get_params = True, param_file = param_file, subject_id = subject_id)
data = utils.preprocess(df, **preprocessing_params)
saver = TEResultSaver(filename, save_folder, raw_save_root)
calc = startCalc(calc_type)
# Do the calculations
results, p_values, dce = getLocalsForAllRegionPairs(data, param_df, calc, compute_p, saver = saver,
save_every = save_every, results = results,
p_values = p_values, dce = dce, idx_values = idx_values,
use_source_embedding = use_source_embedding)
# Save the final results
# Add back the trimmed sections at the start and end of the timeseries by padding with zeros
padding = ((0,0), (preprocessing_params.get('trim_start', 0), preprocessing_params.get('trim_end', 0)))
saver.save_final_result(results, p_values, dce, padding = padding, compress = compress)
print("\nTime taken:", round((time.time() - start_time) / 60, 2), 'min')
def run_experiment(experiment_number, i, local_test = False, compute_p = False, repetition = None):
"""
Run a particular experiment with a specified set of parameters and data
A folder is created for the results. The folder name is modified by the repetition number,
and whether p values are calculated
Arguments:
experiment_number -- An INt which states which experiment to run
i -- An Int which states which file or subject to load and process
local_test -- If True, set file paths for local testing
compute_p -- If True, computes the p value of the returned AIS
repetition -- None, or an Int which specifies the repetition number of the run
Repetitions are saved in their own folder with the number as a suffix
"""
# Get parameters which are common across a particular experiment type
if experiment_number in [0,2,3,4,5,6,7,8]: # HCP experiments
common_params = {
'data_path': '../Data' if local_test else 'Data/HCP',
'extension': '.tsv',
'sampling_rate': 1.3
}
elif experiment_number in [1]: # ATX experiments
common_params = {
'data_path': '/media/mike/Files/Data and Results/N-back/Data/ATX_data' if local_test else 'Data/ATX_data',
'extension': '.csv',
'sampling_rate': 1
}
else:
raise Exception("No common parameters specified for experiment. Check experiment type")
common_params['raw_save_root'] = '/media/mike/Files/Data and Results/N-back' if local_test else "/scratch/InfoDynFuncStruct/Mike/N-back/"
common_params['compress'] = False # Decides whether npz or npy file type is used to save the raw local TE values
common_params['compute_p'] = compute_p
common_params['save_every'] = None
def get_save_folder(folder_name):
"""
Modifies the folder name to include an indication if p values are calculated, and
adds a suffix indicating the repetition number if repetition != None
"""
save_folder = folder_name + ('_with-p' if compute_p else "")
save_folder += ('_r{:02}'.format(repetition) if repetition is not None else "")
return save_folder
# Run experiment
print("Running experiment:", experiment_number)
if experiment_number == 0: # HCP
run(i, save_folder = get_save_folder('HCP'), mean_processing_type = 'removal',
trim_start = 50, trim_end = 25, **common_params)
elif experiment_number == 1: # ATX
run(i, save_folder = get_save_folder('ATX'), mean_processing_type = 'removal',
trim_start = 25, trim_end = 25, **common_params)
elif experiment_number == 2: # HCP -- no global mean removal
run(i, save_folder = get_save_folder('HCP_no-mean-removal'), mean_processing_type = None,
trim_start = 50, trim_end = 25, **common_params)
elif experiment_number == 3: # HCP - using linear gaussian estimator
run(i, save_folder = get_save_folder('HCP_gaussian'), mean_processing_type = 'removal',
trim_start = 50, trim_end = 25, calc_type = 'gaussian', **common_params)
elif experiment_number == 4: # HCP -- time-lagged MI (gaussian)
run(i, save_folder = get_save_folder('HCP_MI-gaussian'), mean_processing_type = 'removal',
trim_start = 50, trim_end = 25, set_k_to_0 = True, calc_type = 'gaussian', **common_params)
elif experiment_number == 5: # HCP -- time-lagged MI (KSG)
run(i, save_folder = get_save_folder('HCP_MI-KSG'), mean_processing_type = 'removal',
trim_start = 50, trim_end = 25, set_k_to_0 = True, **common_params)
elif experiment_number == 6: # HCP - using population parameters
run(i, save_folder = get_save_folder('HCP_pop-param'), mean_processing_type = 'removal',
trim_start = 50, trim_end = 25, **common_params)
elif experiment_number == 7: # HCP - using lfilter
run(i, save_folder = get_save_folder('HCP_filter'), mean_processing_type = 'removal',
trim_start = 50, trim_end = 25, use_filtfilt = False, use_source_embedding = True, **common_params)
elif experiment_number == 8: # HCP - using lfilter and mean regression
run(i, save_folder = get_save_folder('HCP_filter_meanregression'), mean_processing_type = 'regression',
trim_start = 50, trim_end = 25, use_filtfilt = False, use_source_embedding = True, **common_params)
else:
raise Exception("Experiment not defined")
############################################################################################################
if __name__ == "__main__":
if os.path.exists('/home/mili7522/'):
local_test = False
else:
local_test = True
# Parse command line arguments
parser = argparse.ArgumentParser(description = 'Run TE calculation on a particular data set.'
+ ' Input two integers, the subject number followed by the experiment number.')
parser.add_argument('subject_number', type = int, help = 'the number of the subject or file to load')
parser.add_argument('experiment_number', type = int, help = 'the number of the experiment to perform', default = 0)
parser.add_argument('-p', '--compute_p', action = 'store_true', default = False, help = 'set as True to calculate p-values (with 1000 surrogates)')
parser.add_argument('-r', '--repetition', metavar = 'R', type = int, default = None, help = 'repetition number. Default = None')
if len(sys.argv) > 1:
args = parser.parse_args()
subject_number = args.subject_number
experiment_number = args.experiment_number
compute_p = args.compute_p
repetition = args.repetition
run_experiment(experiment_number, subject_number, local_test, compute_p, repetition)
else:
print("Testing for one pair")
test_for_one_pair()