Ejemplo n.º 1
0
    def smoke_test(self):
        """ A simple smoke test with random initialization"""

        input_size = 4
        input_length = 4
        batch_size = 2
        n_units = 4

        cell = ESNCell(n_units)
        inputs = np.random.random([input_length, batch_size, input_size])

        state = cell.zero_state(batch_size, tf.float64)
        for i in range(input_length):
            if i > 0: tf.get_variable_scope().reuse_variables()
            state, _ = cell(inputs[i, :, :], state)

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())

            final_states = sess.run(state)

        expected_final_states = [[
            -0.56735968, -0.21625957, 0.69647415, -0.91361383
        ], [-0.22654705, -0.15751715, 0.85077971, -0.89757621]]

        self.assertAllClose(final_states, expected_final_states)
    def test_esn_dynamics(self):
        """ A simple smoke test """

        input_size = 4
        input_length = 4
        batch_size = 2
        n_units = 4

        cell = ESNCell(n_units)
        inputs = np.random.random([input_length, batch_size, input_size])

        state = cell.zero_state(batch_size, tf.float64)
        for i in range(input_length):
            if i > 0: tf.get_variable_scope().reuse_variables()
            state, _ = cell(inputs[i, :, :], state)

        with self.test_session() as sess:
            sess.run(tf.initialize_all_variables())
            final_states = sess.run([state])

        expected_final_states = [[[
            0.75952783, -0.96463442, 0.72289173, 0.38016839
        ], [0.82451594, -0.99358452, 0.86248011, 0.24540841]]]

        self.assertAllClose(final_states, expected_final_states)
Ejemplo n.º 3
0
    def test_esn_dynamics(self):
        """ Simple test of reservoir dynamics """

        # Data
        w_r = np.array([[0.03887243, -0.28983904, -0.53829223],
                        [0.06456875, 0.0, 0.151112258],
                        [-0.042949107, -0.48700565, -0.22361958]])
        w_in = np.array([[0.3, 0.2], [-0.2, 0.01], [0.1, -0.4]])
        w_bias = np.array([[0.2, -0.1, -0.34]])

        x = np.array([[1, 0.3], [0.1, 0.4], [-1, 0.3], [-0.3, 0.4]])
        states_zero = np.array([[0.0, 0.0, 0.0]])

        # Manually compute reservoir states
        s = states_zero
        states_manual = np.array(states_zero)
        for i in x:
            s = np.tanh(np.matmul(w_in, i) + np.matmul(s, w_r) + w_bias)
            states_manual = np.append(states_manual, s, axis=0)
        states_manual = states_manual[1:]

        # Oger
        # ESN_O = Oger.nodes.ReservoirNode(w_in=w_in, w_bias=w_bias, w=w_r.transpose(), output_dim=3, reset_states=True)
        # ESN_O.states = states_zero
        # states_Oger = ESN_O(x)

        # Tensorflow
        with tf.variable_scope("rnn/ESNCell"):
            tf.get_variable(initializer=w_r, name='ReservoirMatrix')
            tf.get_variable(initializer=w_in.transpose(), name="InputMatrix")
            tf.get_variable(initializer=w_bias[0], name="Bias")

        tf.get_variable_scope().reuse_variables()
        cell = ESNCell(num_units=np.size(w_r, axis=1))
        (outs, _) = tf.nn.dynamic_rnn(cell=cell,
                                      inputs=np.reshape(x, [1, 4, 2]),
                                      initial_state=states_zero,
                                      time_major=False)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            states_tf = sess.run(outs)

        self.assertAllClose(states_manual, states_tf[0])
Ejemplo n.º 4
0
def MackeyGlass(tr_size=500,
                washout_size=50,
                units=30,
                connectivity=0.2,
                scale=0.7,
                elements=2000):
    print("Fetching data...")
    data_str = requests.get(
        'http://minds.jacobs-university.de/sites/default/files/uploads/mantas/code/MackeyGlass_t17.txt'
    ).content

    data = map(float, data_str.splitlines()[:elements])
    data_t = tf.reshape(tf.constant(data), [1, elements, 1])
    esn = ESNCell(units, connectivity, scale)

    print("Building graph...")
    outputs, final_state = tf.nn.dynamic_rnn(esn, data_t, dtype=tf.float32)
    washed = tf.squeeze(tf.slice(outputs, [0, washout_size, 0], [-1, -1, -1]))

    with tf.Session() as S:
        S.run(tf.global_variables_initializer())

        print("Computing embeddings...")
        res = S.run(washed)

        print("Computing direct solution...")
        state = np.array(res)
        tr_state = np.mat(state[:tr_size])
        ts_state = np.mat(state[tr_size:])
        wout = np.transpose(
            np.mat(data[washout_size + 1:tr_size + washout_size + 1]) *
            np.transpose(np.linalg.pinv(tr_state)))

        print("Testing performance...")
        ts_out = np.mat((np.transpose(ts_state * wout).tolist())[0][:-1])
        ts_y = np.mat(data[washout_size + tr_size + 1:])

        ts_mse = np.mean(np.square(ts_y - ts_out))

    print("Test MSE: " + str(ts_mse))
Ejemplo n.º 5
0
    def __init__(self,
                 maxGradient,
                 timeSteps,
                 nHorizons,
                 inputSize,
                 nHiddenUnits,
                 cellType=RNNCellType.BasicCell,
                 nLayers=2):
        self.maxGradient = maxGradient
        self.nLayers = nLayers
        self.timeSteps = timeSteps
        self.nHorizons = nHorizons
        self.inputSize = inputSize
        self.nHiddenUnits = nHiddenUnits
        self.cellType = cellType
        with tf.name_scope("Parameters"):
            self.learningRate = tf.placeholder(tf.float32, name="learningRate")
            self.keepProbability = tf.placeholder(tf.float32,
                                                  name="keepProbability")

        with tf.name_scope("Input"):
            self.input = tf.placeholder(tf.float32,
                                        shape=(None, timeSteps, inputSize),
                                        name="input")
            self.targets = tf.placeholder(tf.float32,
                                          shape=(None, timeSteps, nHorizons),
                                          name="targets")
            self.init = tf.placeholder(tf.float32, shape=(), name="init")
            self.batchSize = self.input.get_shape()[0]
        #Declare the CNN structure here!
        #with tf.name_scope("Embedding"):
        #    self.embedding = tf.Variable(tf.random_uniform((inputSize, hidden_units), -self.init, self.init),
        #                                 dtype=tf.float32,
        #                                 name="embedding")
        #    self.w = tf.get_variable("w", (inputSize, hidden_units))
        #    self.b = tf.get_variable("b", inputSize)

        #    self.embedded_input = tf.matmul(self.input, self.w) + self.b

        with tf.name_scope("RNN"):
            if (cellType == RNNCellType.LSTM):
                cell = tf.nn.rnn_cell.LSTMCell(nHiddenUnits,
                                               state_is_tuple=True)
            else:
                if (cellType == RNNCellType.GRU):
                    cell = tf.nn.rnn_cell.GRUCell(nHiddenUnits)
                else:
                    if (cellType == RNNCellType.ESN):
                        cell = ESNCell(nHiddenUnits, inputSize)
                    else:
                        cell = tf.nn.rnn_cell.BasicRNNCell(nHiddenUnits)

            cell = tf.nn.rnn_cell.DropoutWrapper(
                cell, output_keep_prob=self.keepProbability)
            self.rnn_layers = tf.nn.rnn_cell.MultiRNNCell([cell] * nLayers,
                                                          state_is_tuple=True)
            if (cellType == RNNCellType.LSTM):
                state_placeholder = tf.placeholder(
                    tf.float32, [nLayers, 2, None, nHiddenUnits])
                #Unpack the state_placeholder into tuple to use with tensorflow native RNN API
                l = tf.unpack(state_placeholder, axis=0)
                self.state = tuple([
                    tf.nn.rnn_cell.LSTMStateTuple(l[idx][0], l[idx][1])
                    for idx in range(nLayers)
                ])
            else:
                state_placeholder = tf.placeholder(
                    tf.float32, [nLayers, None, nHiddenUnits])
                #Unpack the state_placeholder into tuple to use with tensorflow native RNN API
                l = tf.unpack(state_placeholder, axis=0)
                self.state = tuple(l[idx] for idx in range(nLayers))

            self.outputs, self.nextState = tf.nn.dynamic_rnn(
                self.rnn_layers,
                self.input,
                time_major=False,
                initial_state=self.state)

        with tf.name_scope("Cost"):
            # Concatenate all the batches into a single row.
            self.flattenedOutputs = tf.reshape(self.outputs,
                                               (-1, nHiddenUnits),
                                               name="flattenedOutputs")
            # Project the outputs onto the vocabulary.
            self.w = tf.get_variable("w", (nHiddenUnits, nHorizons))
            self.b = tf.get_variable("b", nHorizons)
            self.predicted = tf.matmul(self.flattenedOutputs, self.w) + self.b
            self.flattenedTargets = tf.reshape(self.targets, (-1, nHorizons),
                                               name="flattenedTargets")
            # Compare predictions to labels.
            self.loss = tf.sqrt(
                tf.reduce_mean(
                    tf.square(tf.sub(self.flattenedTargets, self.predicted))))
            self.cost = tf.reduce_mean(self.loss, name="cost")

        with tf.name_scope("Train"):
            tf.scalar_summary('RMSE', self.cost)
            self.iteration = tf.Variable(0,
                                         dtype=tf.int64,
                                         name="iteration",
                                         trainable=False)
            self.gradients, _ = tf.clip_by_global_norm(tf.gradients(
                self.cost, tf.trainable_variables()),
                                                       maxGradient,
                                                       name="clipGradients")
            optimizer = tf.train.GradientDescentOptimizer(
                learning_rate=self.learningRate)
            self.trainStep = optimizer.apply_gradients(
                zip(self.gradients, tf.trainable_variables()),
                name="trainStep",
                global_step=self.iteration)

        self.initialize = tf.initialize_all_variables()
        self.summary = tf.merge_all_summaries()
Ejemplo n.º 6
0
# see http://www.scholarpedia.org/article/Mackey-Glass_equation

# with open('mgdata.dat.txt') as f:
#     data = map(lambda l: float(l.split(' ')[1]), f.readlines())

washout_size = 50
units = 30

# build the graph and evaluate it
# tf.reset_default_graph()   # so multiple evaluations won't fail

#data_t = tf.reshape(tf.constant(data), [1, -1, 1])   # reshaped for dynamic_rnn: [batch, time, elements]

input_data = create_dataset('mackey')

esn = ESNCell(num_units=units, connectivity=0.2, wr2_scale=0.7)
states_t, _ = tf.nn.dynamic_rnn(esn, data_t, dtype=tf.float32)
washed_t = tf.reshape(states_t[:, washout_size:, :], [-1, units])

with tf.Session() as S:
    tf.global_variables_initializer().run()
    states = np.mat(washed_t.eval())

    tr_size = 500  # we train on the first 500 samples to perform next step prediction
beta_ridge = 1  # with lower beta, 0.01, it fits much better. You can't see the difference in the plot

# train data
tr_states = states[:tr_size]
tr_target = np.expand_dims(data[washout_size + 1:tr_size + washout_size + 1],
                           axis=0)