Esempio n. 1
0
def RNNGenerator(dataset):
    features = [
        tf.compat.v2.feature_column.numeric_column(k, dtype=tf.dtypes.float64)
        for k in dataset.columns.values if (k != 'real' and k != 'actual')
    ]

    def customRELU(x):
        return tf.keras.activations.relu(x, max_value=100)

    model = tf.keras.models.Sequential()
    model.add(layers.Reshape([len(features), 1]))
    model.add(
        layers.SimpleRNN(128,
                         return_sequences=True,
                         kernel_regularizer='l1',
                         bias_regularizer='l2',
                         activation='relu'))
    model.add(
        layers.SimpleRNN(128,
                         return_sequences=True,
                         kernel_regularizer='l1_l2',
                         bias_regularizer='l2'))
    model.add(
        layers.SimpleRNN(128,
                         return_sequences=False,
                         kernel_regularizer='l1_l2',
                         bias_regularizer='l2'))
    model.add(layers.Dropout(.2))
    model.add(layers.Dense(128))
    model.add(layers.Dense(len(features), activation=customRELU))

    return model
 def __init__(self, units):
     super(MyRNN, self).__init__()
     # 词向量编码 [b, 80] => [b, 80, 100]
     # 构建embedding层 总词数,词向量位数,单词数
     # 参数顺序,与上面 [b, 80, 100] 不同
     # 可以直接 self.embedding = layers.Embedding(total_words, embedding_len)
     # 因为不是直接连着全连接层,所以不需要input_length=max_review_len 这个参数
     self.embedding = layers.Embedding(total_words,
                                       embedding_len,
                                       input_length=max_review_len)
     # 构建RNN
     # [b, 80, 100] => [b, 64] 这里的64 就是参数中的 units,就是h的位数
     self.rnn = keras.Sequential([
         # 先跑完一层,在跑另一层,因此 return_sequences=True
         layers.SimpleRNN(units, dropout=0.5, return_sequences=True),
         layers.SimpleRNN(units, dropout=0.5)
     ])
     # 构建分类网络,用于将rnn的输出特征进行分类,2分类
     #  [b, 64] => [b, 1]
     self.outlayer = Sequential([
         layers.Dense(32),
         layers.Dropout(rate=0.5),
         layers.ReLU(),
         layers.Dense(1)
     ])
    def __init__(self, units):
        super(RNNUsingSimpleRNN, self).__init__()

        # transform text to embedding representation
        # [b, 80] => [b, 80, 100]
        self.embedding = layers.Embedding(total_words,
                                          embedding_len,
                                          input_length=max_review_len)

        # [b, 80, 100] , h_dim: 64
        self.rnn = keras.Sequential([
            layers.SimpleRNN(units,
                             dropout=0.5,
                             return_sequences=True,
                             unroll=True),
            layers.SimpleRNN(units,
                             dropout=0.5,
                             return_sequences=True,
                             unroll=True),
            layers.SimpleRNN(units,
                             dropout=0.5,
                             return_sequences=True,
                             unroll=True),
            layers.SimpleRNN(units, dropout=0.5, unroll=True)
        ])

        # fc, [b, 80, 100] => [b, 64] => [b, 1]
        self.output_layer = layers.Dense(1)
Esempio n. 4
0
def RNNSimpleModel(units_layer1, units_layer2):
    inputs = keras.Input(shape=[max_sentence_word_lenth])
    # 经过embeding 从[batchsize,80] 到 [batchsize,80,50]
    X = keras.layers.Embedding(total_words,
                               50,
                               input_length=max_sentence_word_lenth)(inputs)
    print(X.shape)
    # 经过一层RNN
    X, test = layers.SimpleRNN(units_layer1,
                               return_sequences=True,
                               return_state=True,
                               dropout=0.2)(X)
    print(X.shape)
    print(test.shape)
    X = layers.SimpleRNN(units_layer2, dropout=0.2)(X)
    # 注意 这里 如果 两个return都设定True 那么两个返回的都是正常的张量
    # 如果这里只设定return_state = True 那么这里返回的是一个列表
    # 如果什么都不设定,那么就直接返回最后一个隐藏状态的张量
    # print(len(X))
    # print(X[0].shape,id(X[0]))
    # print(X[1].shape,id(X[1]))
    X = layers.Dense(1, activation='sigmoid')(X)

    MyModel = keras.Model(inputs=inputs, outputs=X)
    return MyModel
def rnn_sequential(model, model_name, h_dim, return_seq, name=None):
    """Add one rnn layer in sequential model.

    Args:
        - model: sequential rnn model
        - model_name: rnn, lstm, or gru
        - h_dim: hidden state dimensions
        - return_seq: True or False
        - name: layer name

    Returns:
        - model: sequential rnn model
    """
    if name == None:
        if model_name == "rnn":
            model.add(layers.SimpleRNN(h_dim, return_sequences=return_seq))
        elif model_name == "lstm":
            model.add(layers.LSTM(h_dim, return_sequences=return_seq))
        elif model_name == "gru":
            model.add(layers.GRU(h_dim, return_sequences=return_seq))
    else:
        if model_name == "rnn":
            model.add(
                layers.SimpleRNN(h_dim, return_sequences=return_seq,
                                 name=name))
        elif model_name == "lstm":
            model.add(
                layers.LSTM(h_dim, return_sequences=return_seq, name=name))
        elif model_name == "gru":
            model.add(layers.GRU(h_dim, return_sequences=return_seq,
                                 name=name))

    return model
Esempio n. 6
0
    def __init__(self, units, embedding_len, input_len, total_words):
        """

        :param units: simrnn 向量大小
        :param embedding_len: 单词的向量长度
        :param input_len: 输入每个单个时间序列的长度
        :param total_words: 句子数
        """
        super(RNN, self).__init__()
        # embedding
        self.embedding = layers.Embedding(input_dim=total_words,
                                          output_dim=embedding_len,
                                          input_length=input_len)

        # RnnLayer
        self.rnn = Sequential([
            layers.SimpleRNN(units,
                             dropout=0.5,
                             return_sequences=True,
                             unroll=True),
            layers.SimpleRNN(units, dropout=0.5, unroll=True)
        ])

        # fullConnection
        self.fc = layers.Dense(1)
Esempio n. 7
0
def only_simple_rnn():
    model = models.Sequential([
        layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),
        layers.SimpleRNN(20, return_sequences=True),
        layers.SimpleRNN(1)
    ])

    return model
Esempio n. 8
0
def simple_rnn(dense_node):
    model = models.Sequential([
        layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),
        layers.SimpleRNN(20),
        layers.Dense(dense_node)
    ])

    return model
def make_model():
    my_model = keras.Sequential()
    my_model.add(layers.SimpleRNN(5, return_sequences=True))
    my_model.add(layers.SimpleRNN(5))
    my_model.add(layers.Dense(1, activation=tf.keras.activations.sigmoid))
    my_model.compile(loss=tf.keras.losses.MeanAbsoluteError(),
                  optimizer=tf.keras.optimizers.Nadam(1e-4),
                  metrics=['mae'])
    return my_model
Esempio n. 10
0
def simple_rnn_2(dense_node):
    model = models.Sequential([
        layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),
        layers.SimpleRNN(20, return_sequences=True),
        # 모든 timestep에 Dense(10) 적용
        layers.TimeDistributed(layers.Dense(dense_node))
    ])

    return model
Esempio n. 11
0
 def __init__(self, units):
     super(MyRnn, self).__init__()
     self.embedding = layers.Embedding(total_words,
                                       embedding_len,
                                       input_length=max_review_len)
     self.rnn = keras.Sequential([
         layers.SimpleRNN(units, dropout=0.5, return_sequences=True, unroll=True),
         layers.SimpleRNN(units, dropout=0.5, unroll=True)
     ])
     self.outlayer = layers.Dense(1)
Esempio n. 12
0
def RNN_model():
    model = keras.Sequential([
        layers.Embedding(input_dim=30000, output_dim=32, input_length=maxlen),
        layers.SimpleRNN(32, return_sequences=True),
        layers.SimpleRNN(1, activation='sigmoid', return_sequences=False)
    ])
    model.compile(optimizer=keras.optimizers.Adam(),
                  loss=keras.losses.BinaryCrossentropy(),
                  metrics=['accuracy'])
    return model
Esempio n. 13
0
    def define_NN(self):
        tf.keras.backend.clear_session()  # For easy reset of notebook state.

        in1 = keras.Input(shape=(ImgGenerator.H, ImgGenerator.W // 2, 2), name='inp1')
        in_r1 = layers.Reshape((ImgGenerator.H, ImgGenerator.W), name="reshaped_input")(in1)

        lstm = layers.LSTM(units=256 * 4, name="lstm")(in_r1)
        lstm = layers.Reshape((128, 8), name="reshaped_lstm")(lstm)
        print(lstm)

        in_conv = layers.DepthwiseConv2D((128, 1), padding="same", data_format='channels_last', name="depth-conv")(in1)
        print(in_conv)
        in_conv = layers.Reshape((128, 128), name="reshape_conv")(in_conv)

        rnn = layers.SimpleRNN(256, name="rnn")(in_r1)
        rnn = layers.Reshape((128, 2), name="reshaped_rnn")(rnn)
        print(rnn)

        # Rotated
        rot_layer = layers.Lambda(lambda x: kbck.reverse(x, axes=0), output_shape=(64, 128, 2))(in1)
        rot_layer = layers.Reshape((ImgGenerator.H, ImgGenerator.W), name="reshaped_input2")(rot_layer)
        in_conv2 = layers.SimpleRNN(128, name="rnn2")(rot_layer)
        print(in_conv2)
        in_conv2 = layers.Reshape((128, 1), name="reshape_conv2")(in_conv2)

        d0 = layers.Dense(1024, activation="tanh", name="dense-inp")(in_r1)  #
        print(d0)

        d0 = layers.Concatenate(axis=2)([d0, lstm, in_conv, rnn, in_conv2])
        print(d0.shape)
        rnn = layers.Flatten()(d0)
        # rnn = layers.BatchNormalization(momentum=0.8)(rnn)
        # rnn = layers.LeakyReLU()(rnn)
        print(rnn)
        dense_1 = layers.Dense(2048, activation="relu")(rnn)  # , activation="relu"
        # dense_1 = layers.BatchNormalization(momentum=0.8)(dense_1)
        # dense_1 = layers.LeakyReLU()(dense_1)
        print(dense_1)
        # for layer_idx in range(0, 5):
        #    dense_1 = layers.BatchNormalization(momentum=0.8)(dense_1)
        #    dense_1 = layers.Dense(1024, activation="tanh", name=f"muldence{layer_idx}")(dense_1)#, activation="relu"
        dense_2 = layers.Dense(4096, activation="relu")(dense_1)  # , activation="relu"
        # dense_2 = layers.BatchNormalization(momentum=0.8)(dense_2)
        print(dense_2)

        output = layers.Dense(128 * 128)(dense_2)  # ,, activation="softplus"
        # output = layers.Softmax()(output)

        print(f"Last dense:{output}")

        output = layers.Reshape((128, 128))(output)
        print(f"Out layer:{output}")
        return [in1], [output]
Esempio n. 14
0
    def __init__(self, ):
        super(MyRNN, self).__init__()

        # [b, 8, 804] ,
        self.rnn = keras.Sequential([
            layers.SimpleRNN(521,
                             dropout=0.5,
                             return_sequences=True,
                             unroll=True),
            layers.SimpleRNN(804, dropout=0.5, unroll=True)
        ])

        self.outlayer = layers.Dense(804)
Esempio n. 15
0
    def __init__(self, units):
        super(MyRNN, self).__init__() 


        self.rnn = keras.Sequential([
            layers.SimpleRNN(units, dropout=0.3, return_sequences=True),
            layers.SimpleRNN(units, dropout=0.3)
        ])
        self.outlayer = Sequential([
        	layers.Dense(128),
        	layers.Dropout(rate=0.5),
        	layers.ReLU(),
        	layers.Dense(1)])
Esempio n. 16
0
def get_model_architecture2(n_classes, layers_config):
    model = tf.keras.Sequential()

    rnn_idx = [
        i for i in range(len(layers_config))
        if layers_config[i][0] == RNN_NAME or layers_config[i][0] == LSTM_NAME
    ]
    input_shape = config.n_mels

    for i, (name, dim) in enumerate(layers_config):
        if name == RNN_NAME or name == LSTM_NAME:
            # If last rnn layer
            if i == rnn_idx[-1]:
                return_sequence = False
            else:
                return_sequence = True
            if i == 0:
                if name == RNN_NAME:
                    model.add(
                        layers.SimpleRNN(dim,
                                         input_shape=(None, input_shape),
                                         activation='relu',
                                         return_sequences=return_sequence))
                else:
                    model.add(
                        layers.LSTM(dim,
                                    input_shape=(None, input_shape),
                                    return_sequences=return_sequence,
                                    activation='relu',
                                    recurrent_activation='relu'))
            else:
                if name == RNN_NAME:
                    model.add(
                        layers.SimpleRNN(dim,
                                         activation='relu',
                                         return_sequences=return_sequence))
                else:
                    model.add(
                        layers.LSTM(dim, return_sequences=return_sequence))
        elif name == FC_NAME:
            if i == 0:
                model.add(
                    layers.Dense(dim, activation='relu',
                                 input_dim=input_shape))
            else:
                model.add(layers.Dense(dim, activation='relu'))

    model.add(layers.Dense(n_classes, activation='softmax'))
    return model
    pass
Esempio n. 17
0
File: rnn.py Progetto: leelew/MetReg
    def __init__(
            self,
            hidden_layers_sizes=(64, ),
            activation='relu',
    ):
        super().__init__()
        self.regressor = None
        self.hidden_layers_sizes = hidden_layers_sizes

        self.rnn = []
        for i, n_units in enumerate(self.hidden_layers_sizes):
            self.rnn.append(layers.SimpleRNN(units=n_units))

        self.rnn = layers.SimpleRNN(units=64)
        self.dense = layers.Dense(1)
 def __init__(self, units):
     super(MyRNN, self).__init__()
     # [b,300]=>[b,300,100]
     self.embedding = layers.Embedding(total_words,
                                       embedding_len,
                                       input_length=max_review_len)
     self.rnn = Sequential([
         layers.SimpleRNN(units,
                          dropout=0.5,
                          return_sequences=True,
                          unroll=True),
         layers.SimpleRNN(units, dropout=0.5)
     ])
     # [b,64]=>[b,1]
     self.outlayer = layers.Dense(1)
Esempio n. 19
0
def recurrent_model(units=32, celltype='GRU', Tx=None, Trej=0, print_summary=False, learning_rate=.001, name='recurrent_model', loss='mse', metrics='mse', initializer = keras.initializers.GlorotNormal()):
    '''
    Recurrent neural network model. 
    
    '''
    
    # specify recurrent cell type
    if celltype=='GRU':
        Rcell = layers.GRU(units=units, return_sequences=True, kernel_initializer=initializer, name='Xrec')
    elif celltype=='LSTM':
        Rcell = layers.LSTM(units=units, return_sequences=True, kernel_initializer=initializer, name='Xrec')
    elif celltype=='RNN':
        Rcell = layers.SimpleRNN(units=units, return_sequences=True, kernel_initializer=initializer, name='Xrec')
    
    # Model architecture
    X_input = layers.Input(shape=(Tx,1), name='X0')
    X = Rcell(X_input)
    X = layers.Dense(1, activation='linear', kernel_initializer=initializer, name='Xdense')(X)
    Y = layers.Add(name='Y')([X,X_input])
    
    # Create model
    model = keras.Model(inputs=[X_input], outputs=Y, name=name)
    if print_summary:
        model.summary()
        
    # Compile model
    opt = tf.keras.optimizers.Adam(learning_rate,clipvalue=10)
    model.compile(optimizer=opt, loss=loss, metrics=metrics)
    
    return model
def build_rnn_model(embedding_dim,
                    vocabulary_size,
                    recurrent_type='SimpleRNN',
                    n_rnn_units=64,
                    n_rnn_layers=1,
                    bidirect=True):
    tf.random.set_seed(123)
    model = tf.keras.Sequential()
    model.add(
        tkl.Embedding(input_dim=vocabulary_size,
                      output_dim=embedding_dim,
                      name='embedding_layer'))
    for i in range(n_rnn_layers):
        return_sequences = (i < n_rnn_layers - 1)
        if recurrent_type == 'SimpleRNN':
            recurrent_layer = tkl.SimpleRNN(
                units=n_rnn_units,
                return_sequences=return_sequences,
                name='simple_rnn_layer{}'.format(i))
        elif recurrent_type == 'LSTM':
            recurrent_layer = tkl.LSTM(units=n_rnn_units,
                                       return_sequences=return_sequences,
                                       name='lstm_layer{}'.format(i))
        elif recurrent_type == 'GRU':
            recurrent_layer = tkl.GRU(units=n_rnn_units,
                                      return_seq=return_sequences,
                                      name='gru_layer{}'.format(i))
        if bidirect:
            recurrent_layer = tkl.Bidirectional(recurrent_layer,
                                                name='bidirect_' +
                                                recurrent_layer.name)
        model.add(recurrent_layer)
    model.add(tkl.Dense(64, activation='relu'))
    model.add(tkl.Dense(1, activation='sigmoid'))
    return model
Esempio n. 21
0
    def __init__(self, units):
        super().__init__()
        self.embedding = layers.Embedding(TOTAL_WORD,
                                          EMBEDDING_LEN,
                                          input_length=MAX_REVIEW_LEN)

        self.rnn = keras.Sequential([
            layers.SimpleRNN(units, dropout=0.5, return_sequences=True),
            layers.SimpleRNN(units, dropout=0.5)
        ])
        self.outlayer = Sequential([
            layers.Dense(32),
            layers.Dropout(rate=0.5),
            layers.ReLU(),
            layers.Dense(1)
        ])
Esempio n. 22
0
 def fit(self,
         x_train,
         y_train,
         modelPath,
         epochs=10,
         batch_size=100,
         max_features=39,
         maxlen=32,
         embedding_value=4,
         rnn_value=32):
     self.model = keras.Sequential()
     self.model.add(
         layers.Embedding(max_features,
                          embedding_value,
                          input_length=maxlen))
     self.model.add(layers.SimpleRNN(rnn_value))
     self.model.add(layers.Dropout(0.4))  # change later
     self.model.add(layers.Dense(1))
     self.model.add(layers.Activation('sigmoid'))
     self.model.compile(loss='binary_crossentropy',
                        optimizer='rmsprop',
                        metrics=['accuracy'])
     self.model.summary()
     x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
     self.model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs)
     self.model.save(modelPath)
Esempio n. 23
0
    def _build_model(self):
        input = keras.Input(shape=(self.height, self.width, 1))
        conv1 = layers.Conv2D(64, (3, 3), padding='same',
                              activation='relu')(input)  #(batch,32,128,64)
        pool1 = layers.MaxPool2D(pool_size=(2, 2), strides=2,
                                 padding='same')(conv1)
        conv2 = layers.Conv2D(128, (3, 3), padding='same',
                              activation='relu')(pool1)  #(batch,16,64,128)
        pool2 = layers.MaxPool2D(pool_size=(2, 2), strides=2,
                                 padding='same')(conv2)  #(batch,8,32,128)
        trans = layers.Permute((2, 1, 3))(pool2)  #(batch,32,8,128)
        reshape = layers.Reshape((32, 1024))(trans)  #(batch,32,1024)

        RNN_in = layers.Dropout(0.5)(reshape)
        # lstm1=layers.Bidirectional(layers.LSTM(31, return_sequences=True), backward_layer=layers.LSTM(31,return_sequences=True, go_backwards=True))(RNN_in) #(batch,31,512)
        rnn = layers.SimpleRNN(256, return_sequences=True)(RNN_in)
        # drop1=layers.Dropout(0.5)(lstm1)
        # lstm2=layers.Bidirectional(layers.LSTM(31, return_sequences=True), backward_layer=layers.LSTM(31,return_sequences=True, go_backwards=True))(Drop1)
        # lstm1=layers.LSTM(256, return_sequences=True, activation='sigmoid')(RNN_in)
        # lstm2=layers.LSTM(256, return_sequences=True, activation='sigmoid')(bn4)
        drop2 = layers.Dropout(0.5)(rnn)
        logits = layers.Dense(self.num_of_classes,
                              activation='softmax',
                              kernel_constraint='UnitNorm',
                              use_bias=False)(drop2)  #(batch,31,84)

        # decoded, log_prob = tf.nn.ctc_beam_search_decoder(
        #     logits, [6,5])
        # dense_decoded = tf.sparse.to_dense(
        #     decoded[0], default_value=-1, name="dense_decoded"
        # )

        self.model = keras.Model(inputs=input, outputs=logits)
        self.model.compile(loss=self.loss.ctc_loss, optimizer=self.optimizer
                           )  #,metrics=[self.loss.ctc_beam_decoder_loss])
Esempio n. 24
0
def get_model_architecture(n_classes, rnn_layers, rnn_dim, fc_layers, fc_dim):
    model = tf.keras.Sequential()

    # The output of SimpleRNN will be a 2D tensor of shape (batch_size, 32)
    # model.add(layers.SimpleRNN(32, input_shape=(config.n_mels, config.tisv_frame), return_sequences=True,  activation='relu')) #, activation='relu'
    for i in range(rnn_layers):
        if i == rnn_layers - 1:
            return_sequence = False
        else:
            return_sequence = True

        # if lstm:
        #     model.add(
        #         layers.LSTM(rnn_dim, input_shape=(None, config.tisv_frame), return_sequences=return_sequence))
        # else:
        model.add(
            layers.SimpleRNN(rnn_dim,
                             input_shape=(None, config.n_mels),
                             activation='relu',
                             return_sequences=return_sequence))
    for i in range(fc_layers):
        model.add(layers.Dense(fc_dim, activation='relu'))

    # model.add(layers.Dense(len(np.unique(y_train)), activation='softmax'))
    model.add(layers.Dense(n_classes, activation='softmax'))
    return model
Esempio n. 25
0
def get_model(bidirectional = False, seqModelType = "SimpleRNN", RNNunits = 32):
    model = keras.Sequential()
    model.add(layers.InputLayer(input_shape=(None,s)))

    if seqModelType == "HMM":
        seqLayer = HMMLayer(5, 15) # (10,15) is better than (5,11)
    elif seqModelType == "LSTM":
        seqLayer = layers.LSTM(RNNunits)
    elif seqModelType == "GRU":
        seqLayer = layers.GRU(RNNunits)
    elif seqModelType == "SimpleRNN":
        seqLayer = layers.SimpleRNN(RNNunits)
    else:
        sys.exit("unknown sequence model type " + seqModelType)

    if bidirectional:
        seqLayer = layers.Bidirectional(seqLayer)
    
    model.add(seqLayer)
    model.add(layers.Dense(1, activation='sigmoid'))
    lr = 1e-3
    #if seqModelType == "HMM":
    #    lr = 1e-2
    print (f"lr={lr}")
    model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = lr),
                  loss = tf.keras.losses.BinaryCrossentropy(), metrics = ["accuracy"])
    return model
Esempio n. 26
0
def rnn(n_outputs, window_size):
    # 1-layer, basic model, doesn't work very well...

    m = Sequential()
    m.add(layers.InputLayer(input_shape=(window_size, 1)))

    m.add(
        layers.SimpleRNN(units=64,
                         activation='tanh',
                         use_bias=True,
                         kernel_initializer='glorot_uniform',
                         recurrent_initializer='orthogonal',
                         bias_initializer='zeros',
                         kernel_regularizer=None,
                         recurrent_regularizer=None,
                         bias_regularizer=None,
                         activity_regularizer=None,
                         kernel_constraint=None,
                         recurrent_constraint=None,
                         bias_constraint=None,
                         dropout=0.5,
                         recurrent_dropout=0.2,
                         return_sequences=False,
                         return_state=False,
                         go_backwards=False,
                         stateful=False,
                         unroll=False))
    m.add(layers.BatchNormalization(name='batch_norm_1'))

    # last layer

    m.add(layers.Dense(n_outputs, activation='sigmoid', name='output'))

    return m
Esempio n. 27
0
def create_model():
    model = keras.models.Sequential()
    # 添加一个Masking层
    model.add(layers.Masking(mask_value=0.0, input_shape=(2, 2)))
    # 添加一个普通RNN层
    rnn_layer = layers.SimpleRNN(50, return_sequences=False)
    model.add(rnn_layer)
    model.add(Dense(300, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(100, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(30, activation='relu'))
    # 多个标签
    # model.add(Dense(10, activation='sigmoid'))
    # 单个标签
    model.add(Dense(10, activation='sigmoid'))

    adam = keras.optimizers.Adam(lr=0.05,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=None,
                                 decay=0.0,
                                 amsgrad=False)
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy', 'binary_accuracy'])
    return model
 def __init__(self, units):
     super(MyRNN, self).__init__()
     # 词向量编码 [b, 80] => [b, 80, 100]
     self.embedding = layers.Embedding(total_words, embedding_len,
                                       input_length=max_review_len)
     # 构建RNN
     self.rnn = keras.Sequential([
         layers.SimpleRNN(units, dropout=0.5, return_sequences=True),
         layers.SimpleRNN(units, dropout=0.5)
     ])
     # 构建分类网络,用于将CELL的输出特征进行分类,2分类
     # [b, 80, 100] => [b, 64] => [b, 1]
     self.outlayer = Sequential([
     	layers.Dense(32),
     	layers.Dropout(rate=0.5),
     	layers.ReLU(),
     	layers.Dense(1)])
    def __init__(self, unit):
        super(MyRNN, self).__init__()

        # embedding层,将字符串转换为数据 embedding_len就是embedding的长度,即一个单词用一个100维的向量表示
        # [b, 80](b句话,每句话80个单词)->[b, 80, 100](b句话,每句话80个单词,每个单词是100的维度)
        self.embedding = layers.Embedding(total_words, embedding_len, input_length=max_review_len)

        # 循环网络层.创建两层
        self.rnn = keras.Sequential([
            layers.SimpleRNN(unit, dropout=0.5,return_sequences=True, unroll=True),
            layers.SimpleRNN(unit, dropout=0.5,unroll=True)


        ])
        # [b, 80, 100] -> [b, 64]

        # 全连接层,分类
        self.outlayer = layers.Dense(1)
Esempio n. 30
0
 def construct(self, dropout_rate: float):
     ''' Construct the model.
     - dropout_rate: Dropout rate.
     '''
     model_input = layers.Input(batch_shape=(None, N_STEPS, N_INPUT))
     x = layers.SimpleRNN(N_NEURONS,
                          activation='elu',
                          return_sequences=True)(model_input)
     output = layers.Dense(N_INPUT, activation='softmax', name='output')(x)
     self.model = keras.Model(inputs=model_input, outputs=output)