dr.trainable_variables_nodes = [
    v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
]

# y_hat are re-generated after each global updating
# gradients of each segments are calculated all according to this y_hat
# namely, Wxx, Wxxu, and Wxu are not updated in each segment
Wxx = np.identity(3) * (1 - 1.6 * du.get('t_delta'))
Wxxu = np.zeros((3, 3))
Wxu = np.array([0., 0., 0.]).reshape(3, 1)

# y_regenerated = regenerate_data(du, Wxx, Wxxu, Wxu)

data = {
    'u':
    tb.split(du.get('u'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data),
    'x_initial':
    tb.split(du.get('x'),
             n_segment=dr.n_recurrent_step,
             n_step=dr.shift_data,
             shift=0),
    'h_initial':
    tb.split(du.get('h'),
             n_segment=dr.n_recurrent_step,
             n_step=dr.shift_data,
             shift=1),
    'x_whole':
    tb.split(du.get('x'),
             n_segment=dr.n_recurrent_step + dr.shift_u_x,
             n_step=dr.shift_data,
             shift=0),
Ejemplo n.º 2
0
    # modify spm_data if necessary
    # iterator = itertools.product(*[package_list, ['H_PARA_INITIAL'], [values]])
    # package_list = p.starmap(tm.modify_signel_data_package, iterator)

    # build graph and training must be done in one function
    iterator = itertools.product(*[[dr], package_list])
    package_list = p.starmap(tm.build_initializer_graph_and_train, iterator)

data = package_list[0].data
assert 'loss_smooth_normalizer' in data.keys()

signal_length = data['x_true_merged'].data.shape[0]
check_length = tm.N_SEGMENTS * tm.DATA_SHIFT

# test split and merge
recovered = tb.merge(tb.split(du.get('x'), 64, 4), 64, 4)
original = du.get('x')[:len(recovered)]
np.testing.assert_array_almost_equal(recovered, original)

# check recorded u and processed u
original = du.get('u')[:check_length]
recovered = data['u_merged'][:check_length]
np.testing.assert_array_almost_equal(recovered, original)
plt.plot(original, '-', label="original")
plt.plot(recovered, '*', label="processed")
plt.show()

# check recorded x_true and the original x_true
original = du.get('u')[:check_length]
recovered = data['u_merged'][:check_length]
np.testing.assert_array_almost_equal(recovered, original)
                      }
dr.build_main_graph_parallel(neural_parameter_initial=neural_parameter_initial)


# process after building the main graph
dr.support_masks = dr.setup_support_mask(mask)
dr.x_parameter_nodes = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                                     scope=dr.variable_scope_name_x_parameter)]
dr.trainable_variables_nodes = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]
dr.trainable_variables_names = [v.name for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]
du_hat.variable_names_in_graph = du_hat.parse_variable_names(dr.trainable_variables_names)


# prepare data for training
data_hat = {
    'x_initial': tb.split(du_hat.get('x'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data, shift=0),
    'h_initial': tb.split(du_hat.get('h'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data, shift=1),
}
data = {
    'u': tb.split(du.get('u'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data),
    'y': tb.split(du.get('y'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data, shift=dr.shift_u_y)}
for k in data_hat.keys():
    data[k] = data_hat[k][: N_SEGMENTS]
for k in data.keys():
    data[k] = data[k][: N_SEGMENTS]
batches = tb.make_batches(data['u'], data_hat['x_initial'], data_hat['h_initial'], data['y'],
                          batch_size=dr.batch_size, if_shuffle=True)

print('start session')
# start session
isess = tf.InteractiveSession()
Ejemplo n.º 4
0
dr.build_main_graph_parallel(neural_parameter_initial=neural_parameter_initial)


# process after building the main graph
dr.support_masks = dr.setup_support_mask(mask)
dr.x_parameter_nodes = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                                     scope=dr.variable_scope_name_x_parameter)]
dr.trainable_variables_nodes = [v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]
dr.trainable_variables_names = [v.name for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]
du_hat.variable_names_in_graph = du_hat.parse_variable_names(dr.trainable_variables_names)


# prepare data for training
# prepare data for training
data = {
    'u': tb.split(du.get('u'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data),
    'x_initial': tb.split(du.get('x'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data, shift=0),
    'h_initial': tb.split(du.get('h'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data, shift=1),
    'x_whole': tb.split(du.get('x'), n_segment=dr.n_recurrent_step + dr.shift_u_x, n_step=dr.shift_data, shift=0),
    'h_whole': tb.split(du.get('h'), n_segment=dr.n_recurrent_step + dr.shift_x_y, n_step=dr.shift_data, shift=1),
    'h_predicted': tb.split(du.get('h'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data, shift=dr.shift_u_y),
    'y': tb.split(du.get('y'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data, shift=dr.shift_u_y)}
data_hat = {
    'x_initial': tb.split(du_hat.get('x'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data, shift=0),
    'h_initial': tb.split(du_hat.get('h'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data, shift=1),
}
for k in data_hat.keys():
    data[k] = data_hat[k][: N_SEGMENTS]
for k in data.keys():
    data[k] = data[k][: N_SEGMENTS]
batches = tb.make_batches(data['u'], data_hat['x_initial'], data_hat['h_initial'], data['y'],
Ejemplo n.º 5
0
mask[dr.Wxxu[2].name][0, 1] = 1
mask[dr.Wxu.name][0, 0] = 1
dr.support_masks = dr.setup_support_mask(mask)
dr.x_parameter_nodes = [
    v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                 scope=dr.variable_scope_name_x_parameter)
]
dr.trainable_variables_nodes = [
    v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
]

# prepare spm_data for training
data = {
    'u':
    tb.split(spm_data['u_upsampled'],
             n_segment=dr.n_recurrent_step,
             n_step=dr.shift_data),
    'y':
    tb.split(spm_data['y_upsampled'],
             n_segment=dr.n_recurrent_step,
             n_step=dr.shift_data,
             shift=dr.shift_u_y),
}
for k in data.keys():
    data[k] = data[k][:N_SEGMENTS]
N_TEST_SAMPLE = min(N_SEGMENTS, len(data['y']))


def apply_and_check(isess, grads_and_vars, step_size, u, x_connector,
                    h_connector, y_true):
Ejemplo n.º 6
0
def prepare_data(max_segments=None, node_index=None):
    global data
    global loss_x_normalizer
    global loss_y_normalizer
    global loss_smooth_normalizer
    global isess

    global SEQUENCE_LENGTH
    global H_STATE_INITIAL
    global IF_NOISED_Y
    global SNR
    global NOISE

    if IF_NOISED_Y:
        std = np.std(du.get('y').reshape([-1])) / SNR
        NOISE = np.random.normal(0, std, du.get('y').shape)
    else:
        NOISE = np.zeros(du.get('y').shape)

    ## saved line. SEQUENCE_LENGTH = dr.n_recurrent_step + (len(spm_data['x_true']) - 1) * dr.shift_data
    data['y_train'] = tb.split(du.get('y') + NOISE, dr.n_recurrent_step, dr.shift_data, dr.shift_x_y)
    max_segments_natural = len(data['y_train'])
    data['y_true'] = tb.split(du.get('y'), dr.n_recurrent_step, dr.shift_data, dr.shift_x_y)[:max_segments_natural]
    data['h_true_monitor'] = tb.split(du.get('h'), dr.n_recurrent_step, dr.shift_data)[:max_segments_natural]
    data['x_true'] = tb.split(du.get('x'), dr.n_recurrent_step, dr.shift_data)[:max_segments_natural]
    data['u'] = tb.split(du.get('u'), dr.n_recurrent_step, dr.shift_data)[:max_segments_natural]

    if max_segments is not None:
        if max_segments > max_segments_natural:
            warnings.warn("max_segments is larger than the length of available spm_data", UserWarning)
        else:
            data['u'] = data['u'][:max_segments]
            data['x_true'] = data['x_true'][:max_segments]
            data['h_true_monitor'] = data['h_true_monitor'][:max_segments]
            data['y_true'] = data['y_true'][:max_segments]
            data['y_train'] = data['y_train'][:max_segments]

    if node_index is not None:
        data['x_true'] = [array[:, node_index].reshape(dr.n_recurrent_step, 1) for array in data['x_true']]
        data['h_true_monitor'] = [np.take(array, node_index, 1) for array in data['h_true_monitor']]
        data['y_true'] = [array[:, node_index].reshape(dr.n_recurrent_step, 1) for array in data['y_true']]
        data['y_train'] = [array[:, node_index].reshape(dr.n_recurrent_step, 1) for array in data['y_train']]
        H_STATE_INITIAL = H_STATE_INITIAL[node_index].reshape(1, 4)

    # collect merged spm_data (without split and merge, it can be tricky to cut proper part from du)
    data['u_merged'] = tb.merge(data['u'], dr.n_recurrent_step, dr.shift_data)
    data['x_true_merged'] = tb.merge(data['x_true'], dr.n_recurrent_step, dr.shift_data)
    # x_hat is with extra wrapper for easy modification with a single index
    data['x_hat_merged'] = tb.ArrayWrapper(np.zeros(data['x_true_merged'].shape), dr.n_recurrent_step, dr.shift_data)
    data['h_true_monitor_merged'] = tb.merge(data['h_true_monitor'], dr.n_recurrent_step, dr.shift_data)
    data['y_true_merged'] = tb.merge(data['y_true'], dr.n_recurrent_step, dr.shift_data)
    data['y_train_merged'] = tb.merge(data['y_train'], dr.n_recurrent_step, dr.shift_data)

    # run forward pass with x_true to show y error caused by error in the network parameters
    isess.run(tf.global_variables_initializer())
    y_hat_x_true_log, h_hat_x_true_monitor_log, h_hat_x_true_connector_log = \
        dr.run_initializer_graph(isess, H_STATE_INITIAL, data['x_true'])
    data['h_hat_x_true_monitor'] = h_hat_x_true_monitor_log
    data['y_hat_x_true'] = y_hat_x_true_log
    data['h_hat_x_true_monitor_merged'] = tb.merge(h_hat_x_true_monitor_log, dr.n_recurrent_step, dr.shift_data)
    data['y_hat_x_true_merged'] = tb.merge(y_hat_x_true_log, dr.n_recurrent_step, dr.shift_data)

    loss_x_normalizer = np.sum(data['x_true_merged'].flatten() ** 2)
    loss_y_normalizer = np.sum(data['y_true_merged'].flatten() ** 2)
    loss_smooth_normalizer = np.std(data['x_true_merged'].flatten()) ** 2

    return data
Ejemplo n.º 7
0
def calculate_log_data():
    global isess

    if 'y_hat_x_true' not in data.keys():
        # run forward pass with x_true to show y error caused by error in the network parameters
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            y_hat_x_true_log, h_hat_x_true_monitor_log, h_hat_x_true_connector_log = \
                dr.run_initializer_graph(sess, H_STATE_INITIAL, data['x_true'])

        data['h_hat_x_true_monitor'] = h_hat_x_true_monitor_log
        data['y_hat_x_true'] = y_hat_x_true_log
        data['h_hat_x_true_monitor_merged'] = tb.merge(h_hat_x_true_monitor_log, dr.n_recurrent_step, dr.shift_data)
        data['y_hat_x_true_merged'] = tb.merge(y_hat_x_true_log, dr.n_recurrent_step, dr.shift_data)

        data['loss_x_normalizer'] = np.sum(data['x_true_merged'].flatten() ** 2)
        data['loss_y_normalizer'] = np.sum(data['y_true_merged'].flatten() ** 2)
        data['loss_smooth_normalizer'] = np.std(data['x_true_merged'].flatten()) ** 2

    data['x_hat'] = tb.split(data['x_hat_merged'].get(), n_segment=dr.n_recurrent_step, n_step=dr.shift_data)
    if IF_NODE_MODE:
        data['x_hat'] = [array.reshape(dr.n_recurrent_step, 1) for array in data['x_hat']]

    isess.run(tf.global_variables_initializer())
    y_hat_log, h_hat_monitor_log, h_hat_connector_log = \
        dr.run_initializer_graph(isess, H_STATE_INITIAL, data['x_hat'])

    # collect results
    # segmented spm_data
    data['x_hat'] = data['x_hat']
    data['x_true'] = data['x_true']

    data['h_true_monitor'] = data['h_true_monitor']
    data['h_hat_x_true_monitor'] = data['h_hat_x_true_monitor']
    data['h_hat_monitor'] = h_hat_monitor_log

    data['y_train'] = data['y_train']
    data['y_true'] = data['y_true']
    data['y_hat_x_true'] = data['y_hat_x_true']
    data['y_hat'] = y_hat_log

    # merged spm_data
    data['x_true_merged'] = data['x_true_merged']
    data['x_hat_merged'] = data['x_hat_merged']

    data['h_true_monitor_merged'] = data['h_true_monitor_merged']
    data['h_hat_x_true_monitor_merged'] = data['h_hat_x_true_monitor_merged']
    data['h_hat_monitor_merged'] = tb.merge(h_hat_monitor_log, dr.n_recurrent_step, dr.shift_data)

    data['y_train_merged'] = data['y_train_merged']
    data['y_true_merged'] = data['y_true_merged']
    data['y_hat_x_true_merged'] = data['y_hat_x_true_merged']
    data['y_hat_merged'] = tb.merge(y_hat_log, dr.n_recurrent_step, dr.shift_data)

    # calculate loss
    loss_x = np.sum((data['x_hat_merged'].data.flatten() - data['x_true_merged'].flatten()) ** 2)
    loss_y = np.sum((data['y_hat_merged'].flatten() - data['y_true_merged'].flatten()) ** 2)
    loss_smooth = np.sum((data['x_hat_merged'].data[0:-1].flatten() - data['x_hat_merged'].data[1:].flatten()) ** 2)

    data['loss_x'].append(loss_x / loss_x_normalizer)
    data['loss_y'].append(loss_y / loss_y_normalizer)
    data['loss_smooth'].append(loss_smooth / loss_smooth_normalizer)
    data['loss_total'].append((loss_y + dr.loss_weighting['smooth'] * loss_smooth) / (
        loss_y_normalizer + dr.loss_weighting['smooth'] * loss_smooth_normalizer))
Ejemplo n.º 8
0
dr.trainable_variables_nodes = [
    v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
]
dr.trainable_variables_names = [
    v.name for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
]
du_hat.variable_names_in_graph = du_hat.parse_variable_names(
    dr.trainable_variables_names)

# prepare data for training
y_target = get_target_curve(du_hat.get('y'), confounds,
                            spm_data['y_upsampled'])
data = {
    'u':
    tb.split(spm_data['u_upsampled'],
             n_segment=dr.n_recurrent_step,
             n_step=dr.shift_data)
}
data_hat = {
    'x_initial':
    tb.split(du_hat.get('x'),
             n_segment=dr.n_recurrent_step,
             n_step=dr.shift_data,
             shift=0),
    'h_initial':
    tb.split(du_hat.get('h'),
             n_segment=dr.n_recurrent_step,
             n_step=dr.shift_data,
             shift=1),
    'y':
    tb.split(y_target,
Ejemplo n.º 9
0
    'k': False,
    'gamma': False,
    'tao': False,
    'epsilon': False,
    'V0': False,
    'TE': False,
    'r0': False,
    'theta0': False,
    'x_h_coupling': False
}
dr.build_main_graph(neural_parameter_initial=neural_parameter_initial)

# prepare spm_data
data = {
    'u':
    tb.split(du.get('u'), n_segment=dr.n_recurrent_step, n_step=dr.shift_data),
    'y':
    tb.split(du.get('y'),
             n_segment=dr.n_recurrent_step,
             n_step=dr.shift_data,
             shift=dr.shift_u_y)
}
n_segment = min([len(data[x]) for x in data.keys()])
for k in data.keys():
    data[k] = data[k][:min([n_segment, N_SEGMENTS])]

# [val.name for val in tf.global_variables()]
# [val.name for val in tf.trainable_variables()]

sess = tf.InteractiveSession()
Ejemplo n.º 10
0
    def prepare_data(self, du, dr, data_package):
        """
        Prepare spm_data in du for dr in training. Create a 'spm_data' dictionary
        :param du: a DataUnit instance, with needed spm_data
        :param dr: a DcmRnn instance, with needed parameters
        :param data_package: a dictionary, stores configure and spm_data for a particular experimental case
        :return: modified
        """
        dp = data_package
        data = dp.data

        # create spm_data according to flag
        if dp.IF_RANDOM_H_PARA:
            data['H_PARA_INITIAL'] = \
                dr.randomly_generate_hemodynamic_parameters(dr.n_region, deviation_constraint=2).astype(np.float32)
        else:
            data['H_PARA_INITIAL'] = dr.get_standard_hemodynamic_parameters(
                n_node=dr.n_region).astype(np.float32)

        if dp.IF_RANDOM_H_STATE_INIT:
            data['H_STATE_INITIAL'] = du.get('h')[random.randint(
                64,
                du.get('h').shape[0] - 64)].astype(np.float32)
        else:
            data['H_STATE_INITIAL'] = \
                dr.set_initial_hemodynamic_state_as_inactivated(n_node=dr.n_region).astype(np.float32)

        if dp.IF_NOISED_Y:
            std = np.std(du.get('y').reshape([-1])) / dp.SNR
            data['NOISE'] = np.random.normal(0, std, du.get('y').shape)
        else:
            data['NOISE'] = np.zeros(du.get('y').shape)

        data['y_train'] = tb.split(
            du.get('y') + data['NOISE'], dr.n_recurrent_step, dr.shift_data,
            dr.shift_x_y)
        max_segments_natural = len(data['y_train'])
        data['max_segments_natural'] = max_segments_natural
        data['y_true'] = tb.split(du.get('y'), dr.n_recurrent_step,
                                  dr.shift_data,
                                  dr.shift_x_y)[:max_segments_natural]
        data['h_true_monitor'] = tb.split(du.get('h'), dr.n_recurrent_step,
                                          dr.shift_data)[:max_segments_natural]
        data['x_true'] = tb.split(du.get('x'), dr.n_recurrent_step,
                                  dr.shift_data)[:max_segments_natural]
        data['u'] = tb.split(du.get('u'), dr.n_recurrent_step,
                             dr.shift_data)[:max_segments_natural]

        if dp.N_SEGMENTS is not None:
            if dp.N_SEGMENTS > max_segments_natural:
                dp.N_SEGMENTS = max_segments_natural
                warnings.warn(
                    "dp.n_segments is larger than the length of available spm_data",
                    UserWarning)
            else:
                data['u'] = data['u'][:dp.N_SEGMENTS]
                data['x_true'] = data['x_true'][:dp.N_SEGMENTS]
                data['h_true_monitor'] = data['h_true_monitor'][:dp.N_SEGMENTS]
                data['y_true'] = data['y_true'][:dp.N_SEGMENTS]
                data['y_train'] = data['y_train'][:dp.N_SEGMENTS]

        if dp.IF_NODE_MODE is True:
            node_index = dp.NODE_INDEX
            data['x_true'] = [
                array[:, node_index].reshape(dr.n_recurrent_step, 1)
                for array in data['x_true']
            ]
            data['h_true_monitor'] = [
                np.take(array, node_index, 1)
                for array in data['h_true_monitor']
            ]
            data['y_true'] = [
                array[:, node_index].reshape(dr.n_recurrent_step, 1)
                for array in data['y_true']
            ]
            data['y_train'] = [
                array[:, node_index].reshape(dr.n_recurrent_step, 1)
                for array in data['y_train']
            ]
            data['H_STATE_INITIAL'] = data['H_STATE_INITIAL'][
                node_index].reshape(1, 4)

        # saved dp.SEQUENCE_LENGTH = dr.n_recurrent_step + (len(spm_data['x_true']) - 1) * dr.shift_data
        # collect merged spm_data (without split and merge, it can be tricky to cut proper part from du)
        data['u_merged'] = tb.merge(data['u'], dr.n_recurrent_step,
                                    dr.shift_data)
        data['x_true_merged'] = tb.merge(data['x_true'], dr.n_recurrent_step,
                                         dr.shift_data)
        # x_hat is with extra wrapper for easy modification with a single index
        data['x_hat_merged'] = \
            tb.ArrayWrapper(np.zeros(data['x_true_merged'].shape), dr.n_recurrent_step, dr.shift_data)
        data['h_true_monitor_merged'] = \
            tb.merge(data['h_true_monitor'], dr.n_recurrent_step, dr.shift_data)
        data['y_true_merged'] = tb.merge(data['y_true'], dr.n_recurrent_step,
                                         dr.shift_data)
        data['y_train_merged'] = tb.merge(data['y_train'], dr.n_recurrent_step,
                                          dr.shift_data)

        return data_package