Beispiel #1
0
def model(x_train, num_labels, units, num_lstm_layers, model_type='lstm'):
    """
    Simple RNN Model with multiple RNN layers and units.
    Inputs:
    - x_train: required for creating input shape for RNN layer in Keras
    - num_labels: number of output classes (int)
    - units: number of RNN units (float)
    - num_lstm_layers: number of RNN layers to add (int)
    - model_type: Type of RNN model (str)
    Returns
    - model: A Keras model
    """
    model = Sequential([
        LSTM(units,
             return_sequences=True,
             input_shape=(x_train.shape[1], x_train.shape[2])) if model_type ==
        'lstm' else GRU(units,
                        return_sequences=True,
                        input_shape=(x_train.shape[1],
                                     x_train.shape[2])) if model_type ==
        'gru' else SimpleRNN(units,
                             return_sequences=True,
                             input_shape=(x_train.shape[1], x_train.shape[2])),
    ] + [
        LSTM(units, return_sequences=True) if model_type ==
        'lstm' else GRU(units, return_sequences=True) if model_type ==
        'gru' else SimpleRNN(units, return_sequences=True),
    ] * (num_lstm_layers - 1) + [Dense(num_labels, activation='softmax')])

    print(model.summary())

    return model
Beispiel #2
0
 def __init__(self,
              maxlen,
              max_features,
              embedding_dims,
              class_num=1,
              last_activation='sigmoid'):
     super(RCNN, self).__init__()
     self.maxlen = maxlen
     self.max_features = max_features
     self.embedding_dims = embedding_dims
     self.class_num = class_num
     self.last_activation = last_activation
     self.embedding = Embedding(self.max_features,
                                self.embedding_dims,
                                input_length=self.maxlen)
     self.forward_rnn = SimpleRNN(128, return_sequences=True)
     self.backward_rnn = SimpleRNN(128,
                                   return_sequences=True,
                                   go_backwards=True)
     self.reverse = Lambda(lambda x: tf.reverse(x, axis=[1]))
     self.concatenate = Concatenate(axis=2)
     self.conv = Conv1D(64, kernel_size=1, activation='tanh')
     self.max_pooling = GlobalMaxPooling1D()
     self.classifier = Dense(self.class_num,
                             activation=self.last_activation)
Beispiel #3
0
    def build_model(self):
        input_current = Input((self.max_len, ))
        input_left = Input((self.max_len, ))
        input_right = Input((self.max_len, ))

        embedder = Embedding(self.max_features,
                             self.embedding_size,
                             input_length=self.max_len)
        embedding_current = embedder(input_current)
        embedding_left = embedder(input_left)
        embedding_right = embedder(input_right)

        x_left = SimpleRNN(128, return_sequences=True)(embedding_left)
        x_right = SimpleRNN(128, return_sequences=True,
                            go_backwards=True)(embedding_right)
        x_right = Lambda(lambda x: K.reverse(x, axes=1))(x_right)
        x = Concatenate(axis=2)([x_left, embedding_current, x_right])

        x = Conv1D(64, kernel_size=1, activation='tanh')(x)
        x = GlobalMaxPooling1D()(x)

        output = Dense(self.class_num, activation=self.activation)(x)
        model = Model(inputs=[input_current, input_left, input_right],
                      outputs=output)
        return model
Beispiel #4
0
def birnn():
    model = Sequential()
    model.add(
        Bidirectional(SimpleRNN(2**5, return_sequences=True),
                      input_shape=(max_len, 1)))
    model.add(Bidirectional(SimpleRNN(2**5, return_sequences=True)))
    model.add(Bidirectional(SimpleRNN(2**5, return_sequences=False)))

    model.add(Dense(10))
    model.add(Activation('softmax'))
    adam = optimizers.Adam(lr=0.001)
    model.compile(loss='binary_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    model.summary()
    early_stopping = EarlyStopping(patience=10)
    history = model.fit(x_train,
                        y_train,
                        validation_split=0.1,
                        epochs=epochs,
                        batch_size=batch_size,
                        verbose=1,
                        callbacks=[early_stopping])

    return model, history
	def model_define(self):
		'''Defines the model architecture'''
		self.model1 = Sequential()
		self.model1.add(BatchNormalization(input_shape = (self.n_steps, self.n_features)))
		self.model1.add(Dropout(0.2))
		if self.rnn_unit:
			if self.more_layer:
				self.model1.add(SimpleRNN(64, activation = 'relu', return_sequences = True))
				self.model1.add(BatchNormalization())
				self.model1.add(SimpleRNN(16, activation = 'relu'))
			else:
				self.model1.add(SimpleRNN(64, activation = 'relu'))
		else:
			if self.more_layer:
				self.model1.add(LSTM(64, activation = 'relu', return_sequences = True))
				self.model1.add(BatchNormalization())
				self.model1.add(LSTM(16, activation = 'relu'))
			else:
				self.model1.add(LSTM(64, activation = 'relu'))
		if self.n_steps_out > 0:
			self.model1.add(Dense(self.n_steps_out))
		else:
			self.model1.add(Dense(1)) # Enables addition of layers and substitutes of computational units with a series of if statements
		self.model1.summary() # Print out a model architecture summary
		plot_model(self.model1, self.input_path + self.output_path + 'model.png', show_shapes = True) # Save a computational graph
		self.model1.save(self.input_path + self.output_path + 'model') # Save the model for future usage
		self.model1.compile(loss = keras.losses.logcosh, # Compile the model with set parameters
				    optimizer = keras.optimizers.Adam(),
				    metrics = ['mse'])
Beispiel #6
0
def build_rnn_model(vocabulary_size, input_length):
    model = Sequential()
    model.add(Embedding(vocabulary_size, 50, input_length=input_length))
    model.add(SimpleRNN(25, return_sequences=True))
    model.add(SimpleRNN(25))
    model.add(Dense(50, activation='relu'))
    model.add(Dense(vocabulary_size, activation='softmax'))
    return model
Beispiel #7
0
def rnn_model(cfg,shapeX, shapeY):
    model_rnn = Sequential()
    model_rnn.add(SimpleRNN(units=50,return_sequences=True, input_shape=(shapeX, shapeY)))
    model_rnn.add(Dropout(0.2))
    model_rnn.add(SimpleRNN(units=50))
    model_rnn.add(Dropout(0.2))
    model_rnn.add(Dense(units = 1))
    model_rnn.compile( loss='mean_squared_error',optimizer=keras.optimizers.Adam(0.001))
    return model_rnn
Beispiel #8
0
def residual_block(x, units, conv_num=3, activation='tanh'):  # ( input, output node, for 문 반복 횟수, activation )
    # Shortcut
    s = SimpleRNN(units, return_sequences=True)(x) 
    for i in range(conv_num - 1):
        x = SimpleRNN(units, return_sequences=True)(x) # return_sequences=True 이거 사용해서 lstm shape 부분 3차원으로 맞춰줌 -> 자세한 내용 찾아봐야함
        x = Activation(activation)(x)
    x = SimpleRNN(units)(x)
    x = Add()([x,s])
    return Activation(activation)(x)
def create_model(model_type):
  clear_session()
  samples = 2000
  epochs = 20
  if model_type == 'Dense':
    x_train = np.random.uniform(0, 10, (samples, 5))
    y_train = (np.mean(x_train, axis=1) ** 2) / 2  # half of squared mean of sample

    model = Sequential()
    model.add(Dense(128, activation='relu', input_shape=x_train.shape[1:]))
    model.add(BatchNormalization())
    model.add(Dense(64, activation='tanh'))
    model.add(BatchNormalization())
    model.add(Dense(32, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(16, activation='tanh'))
    model.add(BatchNormalization())
    model.add(Dense(1, activation='linear'))
  elif model_type == 'SimpleRNN':
    x_train = np.random.uniform(0, 10, (samples, 10, 4))
    y_train = (np.mean(x_train.take(axis=1, indices=8), axis=1) ** 2) / 2  # half of squared mean of sample's 8th index

    model = Sequential()
    model.add(SimpleRNN(128, return_sequences=True, input_shape=x_train.shape[1:]))
    model.add(BatchNormalization())
    model.add(SimpleRNN(64, return_sequences=True))
    model.add(BatchNormalization())
    model.add(SimpleRNN(32))
    model.add(BatchNormalization())
    model.add(Dense(16, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(1, activation='linear'))
  elif model_type == 'GRU':
    x_train = np.random.uniform(0, 10, (samples, 10, 4))
    y_train = (np.mean(x_train.take(axis=1, indices=8), axis=1) ** 2) / 2  # half of squared mean of sample's 8th index

    model = Sequential()
    model.add(GRU(128, input_shape=x_train.shape[1:], return_sequences=True, implementation=2))
    model.add(BatchNormalization())
    model.add(GRU(64, return_sequences=True, implementation=2))
    model.add(BatchNormalization())
    model.add(GRU(32, implementation=2))
    model.add(BatchNormalization())
    model.add(Dense(16, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dense(1, activation='linear'))
  else:
    raise Exception('Unknown model type: {}'.format(model_type))

  model.compile(optimizer='adam', loss='mse')
  model.fit(x_train, y_train, batch_size=32, epochs=epochs, verbose=0)
  if WRITE_KERAS_MODELS:
    model.save('tests/{}.h5'.format(model_type))
  return model, x_train.shape
Beispiel #10
0
def TS_model(bidirect=True, RNN='LSTM', layer_norm=False):
    inputs = layers.Input(shape=(9), name='input')
    inputs_extend = RepeatVector(500)(inputs)
    if bidirect == True:
        if RNN == 'LSTM':
            layer_1 = Bidirectional(
                LSTM(128, return_sequences=True, dropout=0.3))(inputs_extend)
        elif RNN == 'GRU':
            layer_1 = Bidirectional(
                GRU(128, return_sequences=True, dropout=0.3))(inputs_extend)
        else:
            layer_1 = Bidirectional(
                SimpleRNN(128, return_sequences=True,
                          dropout=0.3))(inputs_extend)
    else:
        if RNN == 'LSTM':
            layer_1 = LSTM(128, return_sequences=True,
                           dropout=0.3)(inputs_extend)
        elif RNN == 'GRU':
            layer_1 = GRU(128, return_sequences=True,
                          dropout=0.3)(inputs_extend)
        else:
            layer_1 = SimpleRNN(128, return_sequences=True,
                                dropout=0.3)(inputs_extend)

    if layer_norm == True:
        layer_1 = LayerNormalization()(layer_1)

    if bidirect == True:
        if RNN == 'LSTM':
            layer_2 = Bidirectional(
                LSTM(128, return_sequences=True, dropout=0.3))(layer_1)
        elif RNN == 'GRU':
            layer_2 = Bidirectional(
                GRU(128, return_sequences=True, dropout=0.3))(layer_1)
        else:
            layer_2 = Bidirectional(
                SimpleRNN(128, return_sequences=True, dropout=0.3))(layer_1)
    else:
        if RNN == 'LSTM':
            layer_2 = LSTM(128, return_sequences=True, dropout=0.3)(layer_1)
        elif RNN == 'GRU':
            layer_2 = GRU(128, return_sequences=True, dropout=0.3)(layer_1)
        else:
            layer_2 = SimpleRNN(128, return_sequences=True,
                                dropout=0.3)(layer_1)

    if layer_norm == True:
        layer_2 = LayerNormalization()(layer_2)

    layer_3 = TimeDistributed(Dense(64, activation='elu'))(layer_2)
    outputs = TimeDistributed(Dense(1))(layer_3)
    model = Model(inputs, outputs)
    return model
def rnnModel():
    model = Sequential()
    model.add(create_embedding_layer(word_index, MAX_SEQUENCE_LENGTH))
    model.add(SimpleRNN(int(64), return_sequences=True, activation="relu"))
    model.add(Dropout(0.3))
    model.add(SimpleRNN(int(64), activation="relu"))
    model.add(Dropout(0.3)) 
    model.add(Dense(int(32)))
    model.add(Dropout(0.3))
    model.add(Dense(7, activation='softmax'))
    return model
def MyRNN(isMeanPoolinng, stateDim, embedding):
    model = models.Sequential()
    model.add(embedding)
    if isMeanPoolinng:
        model.add(SimpleRNN(units=stateDim, return_sequences=True))
        model.add(GlobalAveragePooling1D())
    else:
        model.add(SimpleRNN(units=stateDim))
    model.add(Dense(1, activation='sigmoid'))
    model.summary()
    return model
def rnnModel():
    model = Sequential()
    model.add(create_embedding_layer(word_index))
    model.add(SimpleRNN(256, activation="relu",return_sequences=True))
    model.add(Dropout(0.3))
    model.add(SimpleRNN(128, activation="relu"))
    model.add(Dropout(0.3))
    model.add(Dense(64))
    model.add(Dropout(0.3))
    model.add(Dense(len(labels), activation='softmax'))
    return model
 def create(self) -> tf.keras.models.Model:
     model = tf.keras.models.Sequential([InputLayer((50, 1), name='input')],
                                        name=self.name)
     for i in range(9):
         model.add(
             SimpleRNN(75,
                       return_sequences=True,
                       name='rnn0{}'.format(i + 1)))
     model.add(SimpleRNN(75, return_sequences=False, name='rnn10'))
     model.add(Dense(1, activation=None, name='output'))
     return model
Beispiel #15
0
def rnn_model(df):
    # Mapping event to number 
    events = {'purchase':1,'cart': 2,'view': 3, 'remove_from_cart':4}
    df['event'] = df.event_type.map(events)
    sequence = df.groupby('user_session')['event'].apply(list)
    sequence = sequence.reset_index()
    # Filter the data to extract a Purchase when event=1 or not when event!=1 label.
    sequence['purchase'] = sequence['event'].apply(lambda x: 1 if 1 in x else 0)
    sequence = sequence[sequence['event'].map(len)> 1]
    productdf= pd.DataFrame(df.groupby('user_session')['product_id'].apply(list)).reset_index()
    productdf=productdf[productdf['product_id'].map(len)>1]
    #Add product list to the dataframe
    sequence['product']=productdf['product_id']
    #The sequence data should not contain the "purchase field" so it is filtered out
    sequence['event']= sequence.event.apply(lambda row: list(filter(lambda a: a != 1, row)))
    # Chossing sequance length upto 5 for event pattren and Discard remaining sequences.
    short_sequence_5 = sequence[sequence['event'].map(len) <= 5]
    event_sequence = short_sequence_5['event'].to_list()
    # Padding sequences to have same length 
    event = pad_sequences(event_sequence)
    short_sequence_5['event2']=event.tolist()
#-----------------------------------------Spliting Data -----------------------------------------#

    X=short_sequence_5[['user_session','product','event2']]
    y = np.array(pd.get_dummies(short_sequence_5['purchase'] , prefix='Purchase'))
    X_train, X_test, y_train, y_test = train_test_split(X , y,test_size=0.3)

#-------------------------Resizing is necessary since input to sequence models is (1,d)------------#

    Xe_train = np.array((X_train['event2'].tolist())) 
    Xe_train=Xe_train.reshape((Xe_train.shape[0], 1, Xe_train.shape[1]))
    Xe_test = np.array((X_test['event2'].tolist())) 
    Xe_test=Xe_test.reshape((Xe_test.shape[0], 1, Xe_test.shape[1]))

#------------------------------------------------Initializing RNN model---------------------------------#

    model = Sequential()
    model.add(SimpleRNN(units=40, return_sequences = True, input_shape = (1,5) ))
    model.add(SimpleRNN(2*40))
    model.add(Dense(2, activation='softmax'))
    model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.0003), loss='mean_absolute_error',metrics=['acc'])
#-------------------------------------------------Fitting RNN Model--------------------------------------#

    model.fit(Xe_train, y_train,epochs=30 , batch_size=1000 , validation_data=(Xe_test, y_test)) 
    model_adam = model

#-------------------------------------------------Saving RNN Model--------------------------------------#

    model_adam.save("C:/Users/hano0/Desktop/DSI8/atom/RNNmodel.h5")
    print("Saved model to disk")

#------------------------------------Returning Test Data for Prediction ------------------------#
    return  Xe_test, y_test , X_test
def train_network(embedding_vector_dimensionality, embedding_dropout_factor,
                  recurrent_dropout_factor, RNN_dropout_factor,
                  layer_dropout_factor, recurrent_layer_sizes, lr, lr_decay,
                  batch_size, epoch_no, max_train_size, max_test_size):
    X_train, y_train, X_test, y_test = load_data()
    if max_train_size and type(max_train_size) == int:
        X_train = X_train[:max_train_size]
        y_train = y_train[:max_train_size]
    if max_test_size and type(max_test_size) == int:
        X_test = X_test[:max_test_size]
        y_test = y_test[:max_test_size]
    print("Shape of the training input: (%d, %d)" % X_train.shape)
    print("Shape of the training output: (%d, %d)" % y_train.shape)
    model = Sequential()
    model.add(
        Embedding(get_word_count(),
                  embedding_vector_dimensionality,
                  input_length=X_train.shape[1]))
    model.add(Dropout(embedding_dropout_factor))
    for size in recurrent_layer_sizes[:-1]:
        model.add(
            SimpleRNN(units=size,
                      return_sequences=True,
                      recurrent_dropout=recurrent_dropout_factor,
                      dropout=RNN_dropout_factor))
        model.add(Dropout(layer_dropout_factor))
    model.add(
        SimpleRNN(units=recurrent_layer_sizes[-1],
                  recurrent_dropout=recurrent_dropout_factor,
                  dropout=RNN_dropout_factor))
    model.add(Dropout(layer_dropout_factor))
    model.add(Dense(y_train.shape[1], activation='sigmoid'))
    optimizer = Adam(lr=lr, decay=lr_decay)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['binary_accuracy', c_score])
    print(model.summary())
    model.fit(X_train,
              y_train,
              validation_data=(X_test, y_test),
              epochs=epoch_no,
              batch_size=batch_size,
              callbacks=[
                  ModelCheckpoint("weights.hdf5",
                                  monitor='val_loss',
                                  save_best_only=True,
                                  mode='auto',
                                  period=1),
                  LogPerformance()
              ])

    scores = model.evaluate(X_test, y_test, verbose=0)
    print("Accuracy: %.2f%%" % (scores[1] * 100))
Beispiel #17
0
def create_rnn(dropout_rate, input_size):
    model = Sequential()
    model.add(SimpleRNN(32, return_sequences=True, input_shape=input_size))
    model.add(SimpleRNN(
        64,
        return_sequences=False,
    ))
    model.add(Dropout(dropout_rate))
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(dropout_rate))
    model.add(Dense(2, activation='softmax'))

    return model
Beispiel #18
0
 def build_model_RNN(self, config):
     seq_len = config['data']['sequence_length'] - 1
     num_features = config['data']['num_features']
     logging.info("[MODEL]: Building model...")
     self.model.add(
         SimpleRNN(100,
                   input_shape=(seq_len, num_features),
                   return_sequences=True))
     self.model.add(SimpleRNN(100, return_sequences=False))
     self.model.add(Dropout(0.2))
     self.model.add(Dense(1, activation='linear'))
     opt = Adam(learning_rate=1e-4, clipnorm=1)
     self.model.compile(loss='mse', optimizer=opt)
Beispiel #19
0
    def fit(self,
            series,
            train_size=0.70,
            window_size=30,
            epochs=100,
            batch_size=32,
            verbose=2):
        self.window_size = window_size
        self.batch_size = batch_size
        self.train_size = train_size

        # Build model
        model = Sequential()
        #model.add(BatchNormalization())
        model.add(
            SimpleRNN(self.rnn_units[0],
                      return_sequences=True,
                      input_shape=[None, 1]))

        for n in self.rnn_units[1:]:
            model.add(SimpleRNN(n, return_sequences=True))
        if not self.dropout == None:
            model.add(Dropout(self.dropout))
        model.add(Dense(units=1))

        # Compile model
        model.compile(loss=keras.losses.Huber(),
                      optimizer='adam',
                      metrics=['mae'])

        # Prepare data
        t = int(len(series) * train_size)
        train = series[:t]
        val = series[t:]
        train = seq2seq_window_dataset(train, window_size, batch_size)
        val = seq2seq_window_dataset(val, window_size, batch_size)

        # Callbacks and fitting
        checkpoint = keras.callbacks.ModelCheckpoint('best_rnn.h5',
                                                     save_best_only=True)
        early_stopping = keras.callbacks.EarlyStopping(patience=50)

        model.fit(train,
                  epochs=epochs,
                  validation_data=val,
                  callbacks=[early_stopping, checkpoint],
                  verbose=verbose)
        print('Training completed')

        self.model = keras.models.load_model("best_rnn.h5")
        del model
Beispiel #20
0
def residual_block(
        x,
        filters,
        conv_num=3,
        activation='relu'):  # ( input, output node, for 문 반복 횟수, activation )
    # Shortcut
    s = Conv1D(filters, 1, padding='same')(x)
    for i in range(conv_num - 1):
        x = SimpleRNN(filters, return_sequences=True)(x)
        x = Activation(activation)(x)
    x = SimpleRNN(filters)(x)
    x = Add()([x, s])
    x = Activation(activation)(x)
    return MaxPool1D(pool_size=2, strides=1)(x)
Beispiel #21
0
def sample_architecture():
    """ return_sequence=True return every output at timestep t
        Necessary for stacking multiple RNN that will require a time factor to compute """
    model = Sequential()
    model.add(Embedding(10000, 32))
    model.add(SimpleRNN(32))
    model.summary()

    model = Sequential()
    model.add(Embedding(10000, 32))
    model.add(SimpleRNN(32, return_sequences=True))
    model.add(SimpleRNN(32, return_sequences=True))
    model.add(SimpleRNN(32, return_sequences=True))
    model.add(SimpleRNN(32))
    model.summary()
Beispiel #22
0
    def build_train_model(self):
        #build model
        self.inputs = Input(shape=self.input_shape)
        self.y = SimpleRNN(units=self.units,
                           kernel_regularizer=self.regul)(self.inputs)
        self.outputs = Dense(self.num_labels, activation='softmax')(self.y)
        self.model = Model(self.inputs, self.outputs)

        self.model.summary()
        print("\n")

        # train model
        self.model.compile(optimizer='sgd',
                           loss='sparse_categorical_crossentropy',
                           metrics=['accuracy'])
        self.model.fit(self.x_train,
                       self.y_train,
                       validation_data=(self.x_test, self.y_test),
                       epochs=self.epochs,
                       batch_size=self.batch_size)
        self.score = self.model.evaluate(self.x_test,
                                         self.y_test,
                                         batch_size=self.batch_size)
        print("\nTest accuracy: %.1f%%" % (100 * self.score[1]))
        print("\n")
Beispiel #23
0
    def rnn_model(self, x_train, x_test, y_train, y_test):
        classes_num = self.dataset.getParameters()["classes_num"]
        model = Sequential()
        model.add(Embedding(
            self.VOCAB_SIZE, self.EMBEDING_DIM, input_length=self.INPUT_LENGTH))
        model.add(SimpleRNN(128))
        model.add(Dense(64, activation='relu'))
        model.add(Dense(classes_num, activation=self.ACTIVATION))
        model.compile(
            loss=self.LOSSFUNC,
            optimizer='adam',
            metrics=['accuracy']
        )
        es_callback = EarlyStopping(
            monitor='val_loss', patience=3)
        model.summary()

        history = model.fit(x_train, y_train,
                            epochs=self.EPOCHS,
                            verbose=1,
                            validation_data=(x_test, y_test),
                            batch_size=self.BATCH_SIZE, callbacks=[es_callback])
        loss, accuracy = model.evaluate(x_train, y_train, verbose=1)
        print("Training Accuracy: {:.4f}".format(accuracy))
        loss, accuracy = model.evaluate(x_test, y_test, verbose=1)
        print("Testing Accuracy:  {:.4f}".format(accuracy))
        return history
Beispiel #24
0
def build_model(nb_words, rnn_model="SimpleRNN", embedding_matrix=None):
    '''
    build_model function:
    inputs: 
        rnn_model - which type of RNN layer to use, choose in (SimpleRNN, LSTM, GRU)
        embedding_matrix - whether to use pretrained embeddings or not
    '''
    model = Sequential()
    # add an embedding layer
    if embedding_matrix is not None:
        model.add(
            Embedding(nb_words,
                      200,
                      weights=[embedding_matrix],
                      input_length=max_len,
                      trainable=False))
    else:
        model.add(
            Embedding(nb_words, 200, input_length=max_len, trainable=False))

    # add an RNN layer according to rnn_model
    if rnn_model == "SimpleRNN":
        model.add(SimpleRNN(200))
    elif rnn_model == "LSTM":
        model.add(LSTM(200))
    else:
        model.add(GRU(200))
    # model.add(Dense(500,activation='relu'))
    # model.add(Dense(500, activation='relu'))
    model.add(Dense(2, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 sequence_length,
                 activation='tanh',
                 loss_fun='MSE',
                 weight_decay=0,
                 metabolic_cost=0,
                 use_bias=True,
                 recurrent_constraint=None):
        super(RNN, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.sequence_length = sequence_length
        self.activation = activation

        self.encoder = Dense(hidden_dim, name='encoder', use_bias=use_bias)
        self.rnn = SimpleRNN(hidden_dim,
                             return_sequences=True,
                             activation=tf.keras.layers.Activation(activation),
                             recurrent_initializer='glorot_uniform',
                             recurrent_constraint=recurrent_constraint,
                             name='RNN',
                             use_bias=use_bias)
        self.decoder = Dense(output_dim, name='decoder', use_bias=use_bias)

        self.loss_fun = tf.keras.losses.get(loss_fun)
        self.weight_decay = weight_decay
        self.metabolic_cost = metabolic_cost
Beispiel #26
0
    def create_model(self):
        """
        Creates the model
        """
        self.mod = Sequential()

        # Add the RNN layer
        # Note 1:  For RNN, we set unroll=True to enable fast GPU usage
        # Note 2:  You need to set the CuDNN version of LSTM
        unroll = tf.test.is_gpu_available()
        if (self.mod_type == 'lstm'):
            # LSTM model
            if tf.test.is_gpu_available():
                self.mod.add(CuDNNLSTM(self.nh, input_shape=(self.nt, self.nin),\
                                  return_sequences=False, name='RNN'))
            else:
                self.mod.add(LSTM(self.nh, input_shape=(self.nt, self.nin),\
                                  return_sequences=False, name='RNN',unroll=unroll))

        elif self.is_complex:
            # Complex RNN
            cell = ComplexRNNCell(nh=self.nh)
            self.mod.add(RNN(cell, input_shape=(self.nt, self.nin),\
                return_sequences=False, name='RNN',unroll=True))
        else:
            # Real RNN model
            self.mod.add(SimpleRNN(self.nh, input_shape=(self.nt, self.nin),\
                return_sequences=False, name='RNN',activation='relu',unroll=unroll))
        self.mod.add(Dense(nout, activation='softmax', name='Output'))
        self.mod.summary()
Beispiel #27
0
def rnn(company):
    df = pd.read_csv('pg4_data.csv', parse_dates=True, index_col='date')
    df = df[df.company == company]
    df.drop(['ticker', 'company'], inplace=True, axis=1)
    df['price'] = df.price.apply(lambda x: x.replace(',', ''))
    df['price'] = pd.to_numeric(df.price, errors='coerce')
    train_data = df[:-7]
    test_data = df[-7:]
    scaler = MinMaxScaler(feature_range=(0, 1))
    train_scaled = scaler.fit_transform(train_data)
    test_scaled = scaler.transform(test_data)
    generator = TimeseriesGenerator(train_scaled, train_scaled, length=3, batch_size=1)
    model = Sequential()
    model.add(SimpleRNN(132, input_shape=(3, 1)))
    model.add(Dense(64))
    model.add(Dense(1))
    early_stops = EarlyStopping(monitor='val_loss', patience=2)
    validation = TimeseriesGenerator(test_scaled, test_scaled, length=3, batch_size=1)
    model.compile(optimizer='adam', loss='mse')
    model.fit(generator, epochs=20, validation_data=validation, callbacks=[early_stops])

    test_prediction = []
    first_eval_batch = test_scaled[-3:]
    current_batch = first_eval_batch.reshape(1, 3, 1)

    current_pred = model.predict(current_batch)[0]
    test_prediction.append(current_pred)
    current_batch = np.append(current_batch[:, 1:, :], [[current_pred]], axis=1)
    true_predictions = scaler.inverse_transform(test_prediction)
    return round(true_predictions[0][0], 2)
 def build_rnf_model(self):
     seq_input = Input(shape=(
         self.window_len,
         4,
     ), name='seq_input')
     chunked_input = DistributeInputLayer(
         filter_width=self.rnf_kernel_size,
         seq_len=self.window_len)(seq_input)
     # Shape:(?, L-F+1, F, D)
     # The TimeDistributed Layer treats index 1 in this input as \
     # independent time steps.
     # So here, the same GRU is being applied to every chunk.
     print('RNF_kernel_size: {}'.format(self.rnf_kernel_size))
     print('RNF_dimension: {}'.format(self.n_filters))
     xs = TimeDistributed(SimpleRNN(self.n_filters))(chunked_input)
     xs = Activation('relu')(xs)
     # Shape:(?, L-F+1, RNF_DIM) # Note here, the LSTM is producing
     # a single output with dimension RNF_DIM
     # Include an L-1 norm at the subsequent dense layer.
     xs = MaxPooling1D(pool_size=15, strides=15)(xs)
     print(xs.shape)
     # Adding Dense Layers.
     xs = LSTM(32, activation='relu')(xs)
     print(xs.shape)
     xs = Dense(128, activation='relu')(xs)
     xs = Dropout(self.dropout_freq)(xs)
     print(xs.shape)
     xs = Dense(128, activation='relu')(xs)
     xs = Dropout(self.dropout_freq)(xs)
     result = Dense(1, activation='sigmoid')(xs)
     # Define the model input & output
     model = Model(inputs=seq_input, outputs=result)
     return model
def build_sequential_model(input_dim=376):
    opt = tf.keras.optimizers.Adam(learning_rate=0.1)
    model = tf.keras.Sequential()
    model.add(Masking(0, input_shape=(None, 13)))
    # model.add(Embedding(input_dim=377, output_dim=13))
    model.add(Bidirectional(LSTM(units=52, return_sequences=True)))
    # model.add(Dropout(0.3))

    # The output of GRU will be a 3D tensor of shape (batch_size, timesteps, 256)
    model.add(Bidirectional(GRU(208, return_sequences=True)))
    model.add(Dropout(0.4))

    # model.add(Dropout(0.4, training=True))
    # model.add(Activation('relu'))

    # The output of SimpleRNN will be a 2D tensor of shape (batch_size, 128)
    # model.add(Bidirectional(LSTM(units=52, return_sequences=True)))
    # model.add(Dropout(0.3))
    model.add(Bidirectional(SimpleRNN(52)))

    model.add(Dropout(0.3))
    # model.add(Dropout(0.3, training=True))
    # model.add(Activation('relu'))

    model.add(Dense(13))
    model.compile(loss='mse',
                  optimizer=opt,
                  metrics=[
                      tf.keras.metrics.MeanSquaredError(),
                      tf.keras.metrics.RootMeanSquaredError(),
                      tf.keras.metrics.MeanAbsoluteError()
                  ])
    return model
Beispiel #30
0
    def _create_model(self, input_dim, out_dim1):
        rnn = SimpleRNN(batch_input_shape=(None, self._maxlen, input_dim),
                        name='rnn1',
                        kernel_initializer='glorot_uniform',
                        bias_initializer='zeros',
                        units=out_dim1,
                        return_sequences=True,
                        activation='linear')
        layer1 = Dense(name='layer1',
                       units=1,
                       kernel_initializer='glorot_uniform',
                       bias_initializer='zeros',
                       activation='linear' if self._is_regression else None)
        if self._with_functional_api:
            inputs = Input(name='layer_in', shape=(self._maxlen, input_dim))
            outputs = layer1(rnn(inputs))
            model = Model(inputs=inputs,
                          outputs=outputs,
                          name='rnn_constructor')

        else:
            model = Sequential([rnn, layer1], name='rnn_constructor')

        model.compile(loss='mean_squared_error',
                      optimizer='adam',
                      metrics=['accuracy'])
        return model