def run(filter_len): np.random.seed(3) tf.set_random_seed(3) D_init = np.random.randn(filter_len, input_size, layer_size) # D_init = generate_dct_dictionary(filter_len, layer_size).reshape((filter_len, input_size, layer_size))*0.1 D = tf.Variable(D_init.reshape((filter_len, 1, input_size, layer_size)), dtype=tf.float32) x = tf.placeholder(tf.float32, shape=(batch_size, seq_size, 1, input_size), name="x") xc = tf.nn.conv2d(x, D, strides=[1, 1, 1, 1], padding='VALID', name="xc") x_v = np.zeros((seq_size, batch_size, input_size)) for bi in xrange(batch_size): for ni in xrange(input_size): x_v[:, bi, ni] = generate_ts(seq_size) x_v = x_v.transpose((1, 0, 2)).reshape( (batch_size, seq_size, input_size, 1)) sess = tf.Session() sess.run(tf.global_variables_initializer()) xc_v = sess.run(xc, {x: x_v}) # dst_pic = "/home/alexeyche/tmp/xc_{0}_{1:.2f}.png".format(int(filter_len), factor) dst_pic = "/home/alexeyche/tmp/xc_{0}.png".format(int(filter_len)) shl(xc_v, file=dst_pic) print filter_len
def gen_ts(seq_size): x = np.zeros((seq_size, batch_size, input_size)) for bi in xrange(batch_size): for ni in xrange(input_size): x[:, bi, ni] = np.diff(generate_ts(seq_size + 1)) x[:, bi, ni] /= np.std(x[:, bi, ni]) return x.transpose((1, 0, 2)).reshape( (batch_size, seq_size, 1, input_size))
grads_and_vars = [] for li, s in enumerate(finstate): dF = s[-1] grads_and_vars += [ (-tf.reduce_mean(dF, 0), net._cells[li].F_flat), ] sess = tf.Session() env = Env("lca_simple") x_v = np.zeros((seq_size, batch_size, input_size)) for bi in xrange(batch_size): for ni in xrange(input_size): x_v[:, bi, ni] = generate_ts(seq_size) x_v = x_v.reshape((seq_size, batch_size, input_size)) state_v = get_zero_state() sample_size = 20 E = np.zeros((sample_size, sample_size)) a_v_res = np.zeros((sample_size, sample_size, seq_size, layer_size)) x_v_res = np.zeros((sample_size, sample_size, seq_size)) W0 = np.linspace(-2.0, 2.0, sample_size) W1 = np.linspace(-2.0, 2.0, sample_size)
# optimizer = tf.train.GradientDescentOptimizer(lrate) apply_grads_step = tf.group( optimizer.apply_gradients([(D_grad, D)]), tf.nn.l2_normalize(D, 0) ) ## sess = tf.Session() sess.run(tf.global_variables_initializer()) x_v = np.zeros((seq_size, batch_size, input_size)) for bi in xrange(batch_size): for ni in xrange(input_size): x_v[:,bi,ni] = np.diff(generate_ts(seq_size+1)) x_v[:,bi,ni] /= np.std(x_v[:,bi,ni]) # x_v[:,bi,ni] = generate_ts(seq_size) # x_v[:,bi,ni] = np.random.randn(seq_size) x_v = x_v.transpose((1, 0, 2)).reshape((batch_size, seq_size, input_size, 1)) h_v = np.random.random(h.get_shape().as_list()) h_v[h_v < 0.999] = 0 e_m_arr, l_m_arr = [], [] lookback, tol = 10, 1e-05