def __init__(self, units):
        super(RNNUsingSimpleRNNCell, self).__init__()

        # [b, 64]
        self.state0 = [tf.zeros([batch_size, units])]
        self.state1 = [tf.zeros([batch_size, units])]
        self.state2 = [tf.zeros([batch_size, units])]
        self.state3 = [tf.zeros([batch_size, units])]

        # transform text to embedding representation
        # [b, 80] => [b, 80, 100]
        self.embedding = layers.Embedding(total_words,
                                          embedding_len,
                                          input_length=max_review_len)

        # [b, 80. 100], h_dim: 64
        # RNN: cell, cell2, cell3
        # SimpleRNN
        self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.5)
        self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.5)
        self.rnn_cell2 = layers.SimpleRNNCell(units, dropout=0.5)
        self.rnn_cell3 = layers.SimpleRNNCell(units, dropout=0.5)

        # fc, [b, 80, 100] => [b, 64] => [b, 1]
        self.output_layer = layers.Dense(1)
Exemple #2
0
 def __init__(self, units):
     super(MyRNN, self).__init__()
     # [b, 64],构建Cell初始化状态向量,重复使用
     self.state0 = [tf.zeros([batch_size, units])]
     self.state1 = [tf.zeros([batch_size, units])]
     # 词向量编码 [b, 80] => [b, 80, 100]
     self.embedding = layers.Embedding(total_words,
                                       embedding_len,
                                       input_length=max_review_len)
     # 构建2个Cell
     self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.5)
     self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.5)
     # # 构建RNN
     # self.rnn = keras.Sequential([
     #     layers.SimpleRNN(units, dropout=0.5, return_sequences=True),
     #     layers.SimpleRNN(units, dropout=0.5)
     # ])
     # 构建分类网络,用于将CELL的输出特征进行分类,2分类
     # [b, 80, 100] => [b, 64] => [b, 1]
     self.outlayer = Sequential([
         layers.Dense(units),
         layers.Dropout(rate=0.5),
         layers.ReLU(),
         layers.Dense(1)
     ])
 def __init__(self, units):
     super(MyRNN, self).__init__()
     self.state0 = [tf.zeros([batchsz, units])]
     self.state1 = [tf.zeros([batchsz, units])]
     self.embedding = layers.Embedding(total_words, embedding_len, input_length=max_review_len)
     self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.2)
     self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.2)
     self.outlayer = layers.Dense(1)
Exemple #4
0
 def __init__(self, units):
     super(MyRNN, self).__init__()
     self.state0 = [tf.zeros(shape=(batch_size, units))]
     self.state1 = [tf.zeros(shape=(batch_size, units))]
     self.embedding = layers.Embedding(num_words, 100, input_length=seq_len)
     self.cell0 = layers.SimpleRNNCell(units, dropout=0.2)
     self.cell1 = layers.SimpleRNNCell(units, dropout=0.2)
     self.outlayer = layers.Dense(1)
 def __init__(self, units1):
     super(MyRNN, self).__init__()
     self.state0 = [tf.zeros([batch_num, units1])]
     self.state1 = [tf.zeros([batch_num, units1])]
     # 这里的“input_dim=total_vocabulary”要与load_data中的“num_words=total_vocabulary”规模相一致
     # 这里的“input_length=max_sentence_len”
     self.embedding = layers.Embedding(input_dim=total_vocabulary, output_dim=embedding_len, input_length=max_sentence_len)
     self.rnn_cell0 = layers.SimpleRNNCell(units=units1, dropout=0.5)
     self.rnn_cell1 = layers.SimpleRNNCell(units=units1, dropout=0.5)
     # 分类网络完成二分类任务,故输出节点设置为1
     self.out_layer = Sequential([layers.Dense(units1), layers.Dropout(rate=0.5), layers.ReLU(), layers.Dense(1)])
Exemple #6
0
    def __init__(self, units_layer1, units_layer2):
        super(MyRNN, self).__init__()

        self.embedding = layers.Embedding(
            total_words, embedding_len, input_length=max_sentence_word_lenth
        )  #第一个参数表示你的数据集中各不相同的词汇有多少,后面表示你要把这些词汇从稀疏连接投影到什么维度的密集连接上来
        # 构建一个RNNCELL
        self.rnn_cell0 = layers.SimpleRNNCell(units_layer1, dropout=0.2)
        self.state0_layer1 = [tf.zeros([bathsize, units_layer1])]  # 初始化最开始的向量
        self.rnn_cell1 = layers.SimpleRNNCell(units_layer2, dropout=0.2)
        self.state0_layer2 = [tf.zeros([bathsize, units_layer2])]
        # 构建一个将units 投影为待输出的 FC
        self.fc = layers.Dense(1)
    def __init__(self, units):
        super(MyRnn, self).__init__()
        self.state0 = [tf.zeros([bachsz, units])]
        self.state1 = [tf.zeros([bachsz, units])]
        # transform text to embedding representation
        # [b, 80] => [b, 80, 100]
        #句子转向量。。。embedding也是训练得到的?
        self.embedding = layers.Embedding(total_words,
                                          embedding_len,
                                          input_length=max_review_len)
        self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.5)
        self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.5)

        self.outlayer = layers.Dense(1)
Exemple #8
0
 def __init__(self):
     super().__init__()
     self.embed_layer = layers.Embedding(10,
                                         32,
                                         batch_input_shape=[None, None])
     self.rnncell = layers.SimpleRNNCell(64)
     self.rnn_layer = layers.RNN(self.rnncell, return_sequences=True)
     self.dense = layers.Dense(10)
Exemple #9
0
    def __init__(self, units):
        super(MyRnn, self).__init__()
        # 每一次进入到网络中的时候都要初始化
        self.state0 = [tf.zeros([batchsz, units])]
        self.state1 = [tf.zeros([batchsz, units])]
        # 变化成embedding编码数据类型
        # [b,80]=>[b,80,100]
        self.embedding = layers.Embedding(total_words, embedding_len,
                                          input_length=max_review_len)

        # [b,80,100], h_dim:64
        self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.2)
        # 增加第二个cell
        self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.2)
        # 全连接层,fc,[b,80,100]=>[b,64]
        # 转换成我们想要的分类结果
        self.outlayer = layers.Dense(1)
Exemple #10
0
    def __init__(self, units):
        super(MyRNN, self).__init__()

        self.state0 = [tf.zeros([batchSize, units])]
        self.state1 = [tf.zeros([batchSize, units])]

        # 将单词编码为长度为100的向量
        # [b,300] => [b,300,100]
        self.embedding = layers.Embedding(input_dim=total_words,
                                          output_dim=embedding_len,
                                          input_length=max_review_len)
        # [b,300,100],h_dim:units
        # 在执行test模式时 dropout参数不起作用,使得test得到较好的效果
        self.rnn_cell0 = layers.SimpleRNNCell(units=units, dropout=0.5)
        self.rnn_dell1 = layers.SimpleRNNCell(units=units, dropout=0.5)

        self.outlayer = layers.Dense(1)
Exemple #11
0
def build_RNN_model():

    rnn_layer = keras.layers.RNN(layers.SimpleRNNCell(units),
                                 input_shape=(None, input_dim))
    model = keras.models.Sequential([
        rnn_layer,
        keras.layers.Dense(output_dim),
    ])
    return model
Exemple #12
0
 def __init__(self, units):
     super(MyRnn, self).__init__()
     #设定初始状态
     self.state0 = [tf.zeros([batch_size, units])]
     self.embedding = layers.Embedding(num_words,
                                       embedding_dim,
                                       input_length=max_len)
     self.rnncell0 = layers.SimpleRNNCell(units, dropout=0.3)
     self.outlayer = layers.Dense(1)
Exemple #13
0
    def __init__(self, units):
        super(MyRNN, self).__init__()

        # 初始化状态 [b, 64]
        self.state0 = [tf.zeros([batchsz, units])]
        self.state1 = [tf.zeros([batchsz, units])]

        # 将句子中的单词进行embedding
        self.embedding = layers.Embedding(total_words,
                                          embedding_len,
                                          input_length=max_review_len)

        # RNN Cell
        # [b, 80, 100], h_dim = 64(RNN Cell当中的内部维度)
        self.run_cell0 = layers.SimpleRNNCell(units, dropout=0.2)
        self.run_cell1 = layers.SimpleRNNCell(units, dropout=0.2)

        # 建立全连接层 [b, 80, 100] => [b, 64] => [b, 1](二分类问题)
        self.outlay = layers.Dense(1)
Exemple #14
0
    def __init__(self, units):
        super(RNN, self).__init__()

        # [b, 64]
        self.state0 = [tf.zeros([batch_size, units])]
        self.state1 = [tf.zeros([batch_size, units])]

        # transform text to embedding representation
        # [b, 100] => [b, 100, 150]
        self.embedding = layers.Embedding(input_dim=total_words,
                                          output_dim=embedding_len,
                                          input_length=max_review_len)

        # SimpleRNNCell
        # units=64
        self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.5)
        self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.5)

        # 全连接层
        # [b, 100, 150] => [b, 64] => [b, 1]
        self.out = layers.Dense(1)
Exemple #15
0
def build_model_RNN(units,input_dim,output_size):

    RNN_layer = layers.RNN(
        layers.SimpleRNNCell(units), input_shape=(None, input_dim)
    )
    model = keras.models.Sequential(
        [
            RNN_layer,
            layers.BatchNormalization(),
            layers.Dense(output_size),
        ]
    )
    return model
    def __init__(self, units):
        super(MyRNN, self).__init__()

        # [b, 64]
        self.state0 = [tf.zeros([batchsz, units])]
        print('******************************************')
        print('self.state0:', self.state0)
        print('tf.zeros([batchsz, units]):', tf.zeros([batchsz, units]))
        self.state1 = [tf.zeros([batchsz, units])]

        # transform text to embedding representation
        # [b, 80] => [b, 80, 100]
        self.embedding = layers.Embedding(total_words,
                                          embedding_len,
                                          input_length=max_review_len)

        # [b, 80, 100] , h_dim: 64
        # RNN: cell1 ,cell2, cell3
        # SimpleRNN
        self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.5)
        self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.5)

        # fc, [b, 80, 100] => [b, 64] => [b, 1]
        self.outlayer = layers.Dense(1)
    def __init__(self, units, total_words, embedding_len, input_length):
        super(MyRNN, self).__init__()
        # [b,64]
        self.h0 = [tf.zeros([batch, units])]
        # transform text to embedding representation
        # [b,80]=>[b,80,100]
        self.embedding = layers.Embedding(total_words,
                                          embedding_len,
                                          input_length=input_length)
        # [b,80,100]

        self.rrn_cell = layers.SimpleRNNCell(units)

        # fc [b,80,100] =>[b,64] => [b,1]
        self.fc = layers.Dense(1)
Exemple #18
0
    def __init__(self, units, layer_num):
        # use Parent class's __init__
        super(SimpleRNN, self).__init__()

        self.layer_num = layer_num
        self.embeding_layer = layers.Embedding(encoder.vocab_size,
                                               80,
                                               input_length=64)
        self.states = []
        self.outs = []
        self.rnn_layers = []
        for i in range(layer_num):
            self.states.append([tf.zeros([BATCH_SIZE, units])])
            self.rnn_layers.append(layers.SimpleRNNCell(units))
            self.outs.append(0)
        self.out_layer = layers.Dense(1, activation='sigmoid')
Exemple #19
0
 def __init__(self, units):
     super(MyRnn, self).__init__()
     # [b,64]
     self.state0 = [tf.zeros([batchsize, units])]
     # 输入层
     # transform text to embedding representation
     # [b,80] => [b,80,100]
     self.embedding = layers.Embedding(total_words,
                                       embedding_len,
                                       input_length=max_review_len)
     # [b,80,100] , h_dim:64
     # RNN: cell1 , cell2 , cell3
     # SimpleRNN
     self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.2)
     # 输出层  二分类的结果
     # fc,[b,80,100] => [b,64] => [b,1]
     self.outlayer = layers.Dense(1)
Exemple #20
0
    def __init__(self,
                 units: int,
                 out_dim: int,
                 shift_std: float = 0.1,
                 cell_type: str = 'lstm',
                 offdiag: bool = False):
        """Constructs a learnable multivariate normal cell.

        Args:
          units: Dimensionality of the RNN function parameters.
          out_dim: The dimensionality of the distribution.
          shift_std: Shift applied to MVN std before building the dist. Providing a shift
            toward the expected std allows the input values to be closer to 0.
          cell_type: an RNN cell type among 'lstm', 'gru', 'rnn', 'gruclip'. case-insensitive.
          offdiag: set True to allow non-zero covariance (within-timestep) in the returned distribution.
        """
        super(LearnableMultivariateNormalCell, self).__init__()
        self.offdiag = offdiag
        self.output_dimensions = out_dim
        self.units = units
        if cell_type.upper().endswith('LSTM'):
            self.rnn_cell = tfkl.LSTMCell(self.units,
                                          implementation=1,
                                          name="mvncell")
            # why does the jupyter notebook version require implementation=1 but not in pycharm?
        elif cell_type.upper().endswith('GRU'):
            self.rnn_cell = tfkl.GRUCell(self.units, name="mvnell")
        elif cell_type.upper().endswith('RNN'):
            self.rnn_cell = tfkl.SimpleRNNCell(self.units, name="mvncell")
        elif cell_type.upper().endswith('GRUCLIP'):
            from indl.rnn.gru_clip import GRUClipCell
            self.rnn_cell = GRUClipCell(self.units, name="mvncell")
        else:
            raise ValueError("cell_type %s not recognized" % cell_type)

        self.loc_layer = tfkl.Dense(self.output_dimensions, name="mvncell_loc")
        n_scale_dim = (tfpl.MultivariateNormalTriL.params_size(out_dim) - out_dim) if offdiag\
            else (tfpl.IndependentNormal.params_size(out_dim) - out_dim)
        self.scale_untransformed_layer = tfkl.Dense(n_scale_dim,
                                                    name="mvndiagcell_scale")
        self._scale_shift = np.log(np.exp(shift_std) - 1).astype(np.float32)
Exemple #21
0
    def _build_net(self, scope):
        with tf.variable_scope('critic'):
            s = tf.expand_dims(
                self.state_in, axis=1, name='timely_input'
            )  # [time_step, feature] => [time_step, batch, feature]
            rnn_cell = layers.SimpleRNNCell(
                self.cell_size
            )  # tf.contrib.rnn.BasicRNNCell(self.cell_size)  #
            self.init_state = rnn_cell.get_initial_state(
                batch_size=1, dtype=tf.float32
            )  # zero_state(batch_size=1, dtype=tf.float32)  #
            outputs, self.final_state = layers.RNN(  # tf.nn.dynamic_rnn
                cell=rnn_cell,
                return_state=True,
                time_major=True)(s, initial_state=self.init_state)  #
            cell_out = tf.reshape(
                outputs, [-1, self.cell_size],
                name='flatten_rnn_outputs')  # joined state representation
            c_hidden = layers.Dense(self.n_hidden,
                                    activation='relu',
                                    name='hidden_layer')(cell_out)
            self.v = layers.Dense(1,
                                  name='output_value')(c_hidden)  # state value

        with tf.variable_scope('actor'):
            with tf.name_scope('hidden_layer'):
                a_hidden_value = layers.Dense(self.n_hidden,
                                              activation='relu')(self.state_in)
            with tf.name_scope('output_layer'):
                self.mu = layers.Dense(self.action_dim,
                                       activation='tanh',
                                       name='mu')(a_hidden_value)
                self.sig = layers.Dense(self.action_dim,
                                        activation='softplus',
                                        name='sigma')(a_hidden_value)
        self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          scope=scope + '/actor')
        self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          scope=scope + '/critic')
Exemple #22
0
# rnn basic
import numpy as np
from tensorflow.keras import layers

# data set
h = [1, 0, 0, 0]
e = [0, 1, 0, 0]
l = [0, 0, 1, 0]
o = [0, 0, 0, 1]

# One cell: input_dim=4, hidden_size(output_dimension)=2
x_data = np.array([[h]], dtype=np.float32)

hidden_size = 2
cell = layers.SimpleRNNCell(units=hidden_size)
rnn = layers.RNN(cell, return_sequences=True, return_state=True)
outputs, states = rnn(x_data)

print('# One cell: input_dim=4, hidden_size=2 #')
print('x_data: {}, shape: {}'.format(x_data, x_data.shape))
print('outputs: {}, shape: {}'.format(outputs, outputs.shape))
print('states: {}, shape: {}'.format(states, states.shape))

# SimpleRNNCell + RNN(?)
rnn = layers.SimpleRNN(units=hidden_size,
                       return_sequences=True,
                       return_state=True)
outputs, states = rnn(x_data)

print('# SimpleRNNCell + RNN(?) #')
print('x_data: {}, shape: {}'.format(x_data, x_data.shape))
Exemple #23
0
out
#%%
net.embeddings
net.embeddings.trainable
net.trainable = False
#%%
"""
# 从预训练模型中加载词向量表
embed_glove = load_embed('glove.6B.50d.txt')
# 直接利用预训练的词向量表初始化Embedding层
net.set_weights([embed_glove])
"""

#%%
cell = layers.SimpleRNNCell(3)
cell.build(input_shape=(None, 4))
cell.trainable_variables

#%%
# 初始化状态向量
h0 = [tf.zeros([4, 64])]
x = tf.random.normal([4, 80, 100])
xt = x[:, 0, :]
# 构建输入特征f=100,序列长度s=80,状态长度=64的Cell
cell = layers.SimpleRNNCell(64)
out, h1 = cell(xt, h0)  # 前向计算
print(out.shape, h1[0].shape)
print(id(out), id(h1[0]))

#%%
Exemple #24
0
 def __init__(self, max_length):
     super().__init__()
     self.embed_layer = layers.Embedding(10, 64, input_length=max_length)
     self.rnn_layer = layers.RNN(layers.SimpleRNNCell(64), return_sequences=True)
     self.dense = layers.Dense(10, activation=tf.nn.softmax)
# -*- coding: utf-8 -*-
'''
Created on 2019/6/21
Author: zhe
Email: [email protected]
'''

import os
import tensorflow as tf
from tensorflow.keras import layers

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# input dim and hidden dim
cell = layers.SimpleRNNCell(3)  #simple differance LSTM and GRU
cell.build(input_shape=(None, 4))
# h = x_t@w_xh+h_t-1@w_hh + bias

print(cell.trainable_variables)

# Single layer RNN Cell
x = tf.random.normal([4, 80, 100])
xt0 = x[:, 0, :]

cell = layers.SimpleRNNCell(64)  #simple differance LSTM and GRU

out, xt1 = cell(xt0, [tf.zeros([4, 64])])

print(out.shape, xt1[0].shape)

print(id(out), id(xt1[0]))
Exemple #26
0
from tensorflow.keras import layers
from tensorflow.keras import Sequential, Model

print(tf.__version__)

# One hot encoding for each char in 'hello'
h = [1, 0, 0, 0]
e = [0, 1, 0, 0]
l = [0, 0, 1, 0]
o = [0, 0, 0, 1]

# One cell RNN input_dim (4) -> output_dim (2)
x_data = np.array([[h]], dtype=np.float32)

hidden_size = 2
cell = layers.SimpleRNNCell(units=hidden_size)  # creating SimpleRNNCell
rnn = layers.RNN(cell, return_sequences=True,
                 return_state=True)  # analogous to tf.nn.dynamic_rnn
outputs, states = rnn(x_data)

print('x_data: {}, shape: {}'.format(x_data, x_data.shape))
print('outputs: {}, shape: {}'.format(outputs, outputs.shape))
print('states: {}, shape: {}'.format(states, states.shape))

# equivalent to above case
rnn = layers.SimpleRNN(units=hidden_size,
                       return_sequences=True,
                       return_state=True)  # layers.SimpleRNNCell + layers.RNN

outputs, states = rnn(x_data)
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt

# Embedding层功能验证
input1 = tf.random.shuffle(tf.range(10))  # shuffle将数据随机打乱
# 创建共10个单词,每个单词用长度为4的向量表示的层。Embedding生成单词向量,可学习。
net_Embedding = layers.Embedding(input_dim=10, output_dim=4)
out = net_Embedding(input1)
print('Embedding层:', out[1])
print(net_Embedding.embeddings[0], net_Embedding.embeddings.trainable)
print('trainable:', net_Embedding.trainable)
net_Embedding.trainable = False

# SimpleRNNCell测试
net_cell = layers.SimpleRNNCell(units=3)
net_cell.build(input_shape=(None, 4))
print('net_cell.trainable_variables:', net_cell.trainable_variables[0][0])

# 初始化状态向量
h0 = [tf.zeros([4, 64])]
x_0 = tf.random.normal([4, 80, 100])
x_1 = x_0[:,
          0, :]  # 这里只要保证去掉只有1列的维后总维度只有2,那就可以计算。也就是[4,1,1,1,1,8]计算时也是二维的[4,8]。其他网络也如此
net_cell2 = layers.SimpleRNNCell(64)
out1, h1 = net_cell2(x_1, h0)
print('shape:', tf.shape(out1), tf.shape(h1), id(h1))

# 构建多层SimpleRNNCell
x = tf.random.normal([4, 80, 100])
xt = x[:, 0, :]
Exemple #28
0
# Name:         ch9_RNNcell
# Description:  本代码讲解了关于RNNcell的操作,以及多层RNN的操作。
# Author:       Administrator
# Date:         2021/1/8
# -------------------------------------------------------------------------------
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics
import matplotlib.pyplot as plt
import os
import numpy as np

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

##
RNNCell = layers.SimpleRNNCell(3)
RNNCell.build(input_shape=(None, 4))

# 使用RNNCell.trainable_variables 进行查看 得到如下输出
# [<tf.Variable 'kernel:0' shape=(4, 3) dtype=float32, numpy=
#  array([[ 0.61222756,  0.71201444, -0.8414649 ],
#         [ 0.21811128, -0.8361399 , -0.5725672 ],
#         [ 0.88111484,  0.90567493, -0.1974346 ],
#         [-0.37566787,  0.0725531 ,  0.7435373 ]], dtype=float32)>,
#  <tf.Variable 'recurrent_kernel:0' shape=(3, 3) dtype=float32, numpy=
#  array([[-0.71919477,  0.6710334 , -0.18020317],
#         [ 0.26817134,  0.5073451 ,  0.8189537 ],
#         [-0.6409704 , -0.5406619 ,  0.5448318 ]], dtype=float32)>,
#  <tf.Variable 'bias:0' shape=(3,) dtype=float32, numpy=array([0., 0., 0.], dtype=float32)>]
# 其中的 kernel:0 是用于给输入 Xi 进行提取特征的 权重矩阵 对应于 Wxa
# 其中的 recurrent_kernel:0 是用于给激活值上一层进行特征提取的 权重矩阵 对应于 Waa
Exemple #29
0
# # out = cell(x)
# # print(out)
# print(cell.trainable_variables)

# cell = layers.SimpleRNNCell(64)
x = tf.random.normal([4, 80, 64])
# h0 = [tf.zeros([4,64])]
# xt = x[:,0,:]#第一个单词向量
# print(xt)
# h = h0
# for xt in tf.unstack(x,axis=1):#循环
#     out,h1 = cell(xt,h)
#     print('---------------out-------------')
#     print(out.shape)
#     print('---------------h1--------------')
#     print(h1[0].shape)
# out = out
# print(out)

# 多层循环网络
h0 = [tf.zeros([4, 64])]
h1 = [tf.zeros([4, 64])]

cell1 = layers.SimpleRNNCell(64)
cell2 = layers.SimpleRNNCell(64)

for xt in tf.unstack(x, axis=1):
    out0, h0 = cell1(xt, h0)
    out1, h1 = cell2(out0, h1)
print(out1.shape)