コード例 #1
0
 def setUp(self):
     self.tm = training_manager.TrainingManager()
     self.tm.N_PACKAGES = 1
     self.PROJECT_DIR = tb.setup_module()
     # load in spm_data
     data_path = self.PROJECT_DIR + "/dcm_rnn/resources/template0.pkl"
     self.du = tb.load_template(data_path)
     self.dr = tfm.DcmRnn()
     self.dr.collect_parameters(self.du)
     self.tm.prepare_dcm_rnn(self.dr, tag='initializer')
コード例 #2
0
N_SEGMENTS = 1024
N_RECURRENT_STEP = 64
# STEP_SIZE = 0.002 # for 32
# STEP_SIZE = 0.5
# STEP_SIZE = 0.001 # for 64
# STEP_SIZE = 0.001 # 128
# STEP_SIZE = 0.0005 # for 256
STEP_SIZE = 1e-5
# DATA_SHIFT = int(N_RECURRENT_STEP / 4)
DATA_SHIFT = 1
LEARNING_RATE = 0.01 / N_RECURRENT_STEP

print(os.getcwd())
PROJECT_DIR = '/Users/yuanwang/Google_Drive/projects/Gits/DCM_RNN'
data_path = PROJECT_DIR + "/dcm_rnn/resources/template0.pkl"
du = tb.load_template(data_path)

dr = tfm.DcmRnn()
dr.collect_parameters(du)
dr.learning_rate = LEARNING_RATE
dr.shift_data = DATA_SHIFT
dr.n_recurrent_step = N_RECURRENT_STEP
neural_parameter_initial = {
    'A': du.get('A'),
    'B': du.get('B'),
    'C': du.get('C')
}
dr.loss_weighting = {
    'prediction': 1.,
    'sparsity': 0.1,
    'prior': 10,
コード例 #3
0
ファイル: fista.py プロジェクト: andreofner/DCM_RNN
    package["Wxxu"] = w_true[1]
    package["Wxu"] = w_true[2]
    x_true = du.scan_x(package)

    if if_plot:
        plt.plot(x_hat, '--')
        plt.plot(x_true)

    return [x_hat, x_true]


# load in spm_data
print('working directory is ' + os.getcwd())
data_path = os.path.join(PROJECT_DIR, 'experiments', 'calculate_abc_from_x',
                         'spm_data.pkl')
data_package = tb.load_template(data_path)
data = data_package.data
# RESULT_PATH_DCM_RNN = PROJECT_DIR + "/dcm_rnn/resources/template0.pkl"
# du = tb.load_template(RESULT_PATH_DCM_RNN)
du = data['key']
signal_length = data['x_true_merged'].data.shape[0]
index_range = range(0, int(signal_length * 3 / 4))

# check whether du and spm_data match
np.testing.assert_array_almost_equal(
    du.get('u')[index_range], data['u_merged'][index_range])
np.testing.assert_array_almost_equal(
    du.get('x')[index_range], data['x_true_merged'][index_range])

# short hand for spm_data
x = data['x_hat_merged'].data[index_range]
コード例 #4
0
ファイル: generate_data.py プロジェクト: andreofner/DCM_RNN
noise = np.random.randn(du.get('y').shape[0], du.get('y').shape[1]) * noise_std
du._secured_data['y_noised'] = du._secured_data['y'] + noise
for i in range(du.get('n_node') + 1):
    plt.subplot(4, 1, i + 1)
    if i == 0:
        plt.plot(du.get('u'))
    else:
        plt.plot(du.get('y_noised')[:, i - 1])

# save data after finding a good u
core = du.collect_parameter_core('y_noised')
# pickle.dump(core, open(CORE_PATH, 'wb'))

## do down sample and up sample, make the data used for DCM-RNN inference
# load data
core = tb.load_template(CORE_PATH)
du_original = tb.DataUnit()
du_original.load_parameter_core(core)
du_original.recover_data_unit()
du = du_original.resample_data_unit()
pickle.dump(du, open(SAVE_PATH_PKL, 'wb'))

# create DCM structure for SPM DCM
core = tb.load_template(CORE_PATH)
du = tb.DataUnit()
du.load_parameter_core(core)
du.recover_data_unit()

mask = du.create_support_mask()
down_sample_rate_u = 4
down_sample_rate_y = 128
コード例 #5
0
MAX_EPOCHS = 32 * 5
CHECK_STEPS = 1
N_RECURRENT_STEP = 192
DATA_SHIFT = 2
MAX_BACK_TRACK = 16
MAX_CHANGE = 0.001
BATCH_RANDOM_DROP_RATE = 0.5
TARGET_T_DELTA = 1. / 16
N_CONFOUNDS = 19

# load data
spm_data = sio.loadmat(RAW_DATA_PATH)
spm_data['stimulus_names'] = ['Photic', 'Motion', 'Attention']
spm_data['node_names'] = ['V1', 'V5', 'SPC']
spm_data['u'] = spm_data['u'].todense()
du = tb.load_template(TEMPLATE_PATH)

# process spm_data,
# up sample u and y to 16 frame/second
shape = list(spm_data['u'].shape)
n_time_point = shape[0]
spm_data['u_upsampled'] = du.resample(spm_data['u'], shape, order=3)
spm_data['y_upsampled'] = du.resample(spm_data['y'], shape, order=3)
n_segments = mth.ceil((n_time_point - N_RECURRENT_STEP) / DATA_SHIFT)

# assume the observation model is
# y = DCM_RNN(u) + Confounds * weights + noise
# Confounds are the first cosine transfer basis (19)
confounds = idct(np.eye(n_time_point)[:, :N_CONFOUNDS], axis=0, norm='ortho')
# plt.plot(confounds)
コード例 #6
0
ファイル: show_results.py プロジェクト: andreofner/DCM_RNN

CONDITION = 'h1_s0_n0'
EXPERIMENT_PATH = os.path.join(PROJECT_DIR, 'experiments',
                               'compare_estimation_with_simulated_data')
DATA_PATH = os.path.join(EXPERIMENT_PATH, 'data')
RESULT_PATH = os.path.join(EXPERIMENT_PATH, 'results')
IMAGE_PATH = os.path.join(EXPERIMENT_PATH, 'images')

CORE_PATH = os.path.join(DATA_PATH, 'core.pkl')
DCM_RNN_RESULT_PATH = os.path.join(RESULT_PATH,
                                   'estimation_' + CONDITION + '.pkl')
SPM_RESULT_PATH = os.path.join(RESULT_PATH, 'saved_data_' + CONDITION + '.mat')

# show estimation curves
core = tb.load_template(CORE_PATH)
du = tb.DataUnit()
du.load_parameter_core(core)
du.recover_data_unit()
if CONDITION == 'h1_s1_n1':
    y_true = du.get('y_noised')[::128]
else:
    y_true = du.get('y')[::128]
y_true.shape

du_rnn = pickle.load(open(DCM_RNN_RESULT_PATH, 'rb'))
y_rnn = du_rnn.get('y')[::32]
y_rnn.shape

spm = sio.loadmat(SPM_RESULT_PATH)
spm['b'] = np.rollaxis(spm['b'], 2)  # correct matlab-python transfer error
コード例 #7
0
                       'gamma': False,
                       'tao': False,
                       'epsilon': False,
                       'V0': False,
                       'TE': False,
                       'r0': False,
                       'theta0': False,
                       'x_h_coupling': False
                       }

EXPERIMENT_PATH = os.path.join(PROJECT_DIR, 'experiments', 'compare_estimation_with_simulated_data')
DATA_PATH = os.path.join(EXPERIMENT_PATH, 'data', 'du_DCM_RNN.pkl')
SAVE_PATH = os.path.join(EXPERIMENT_PATH, 'results', 'estimation_' + CONDITION + '.pkl')

# load data
du = tb.load_template(DATA_PATH)
if SETTINGS[CONDITION]['if_noised_y']:
    du._secured_data['y'] = du.get('y_noised')
n_segments = mth.ceil(len(du.get('y')) - N_RECURRENT_STEP / DATA_SHIFT)

# specify initialization values, loss weighting factors, and mask (support of effective connectivity)
x_parameter_initial = {}
x_parameter_initial['A'] = - np.eye(du.get('n_node'))
x_parameter_initial['B'] = [np.zeros((du.get('n_node'), du.get('n_node'))) for _ in range(du.get('n_stimuli'))]
x_parameter_initial['C'] = np.zeros((du.get('n_node'), du.get('n_stimuli')))
x_parameter_initial['C'][0, 0] = 1.
x_parameter_initial['C'][1, 1] = 1.
h_parameter_initial = du.get('hemodynamic_parameter')

loss_weighting = {'prediction': 1., 'sparsity': 1., 'prior': 1., 'Wxx': 1., 'Wxxu': 1., 'Wxu': 1.}
mask = du.create_support_mask()