Пример #1
0
 def lstm_reshape(self,
                  inputs,
                  name_prefix,
                  index,
                  reshaped_inputs=None,
                  initial=False):
     name_prefix = "{0}_{1}_{2}".format(self.controller_network_name,
                                        name_prefix, index)
     cell = LSTMCell(
         self.lstm_cell_units,
         kernel_initializer=get_weight_initializer(initializer="lstm"),
         recurrent_initializer=get_weight_initializer(initializer="lstm"))
     if initial:
         x = RNN(cell,
                 return_state=True,
                 name="{0}_{1}".format(name_prefix, "lstm"))(inputs)
     else:
         x = RNN(cell,
                 return_state=True,
                 name="{0}_{1}".format(name_prefix,
                                       "lstm"))(reshaped_inputs,
                                                initial_state=inputs[1:])
     rx = Reshape((-1, self.lstm_cell_units),
                  name="{0}_{1}".format(name_prefix, "reshape"))(x[0])
     return x, rx
Пример #2
0
    def build(self, input_shape):

        cell = self.Cell(self.units)
        self.rnn = RNN(cell, return_sequences=self.return_sequences, go_backwards=self.go_backwards)
        self.rnn.build(input_shape=input_shape)
        self._trainable_weights = self.rnn.trainable_weights
        super(Complex_LSTM, self).build(input_shape)
Пример #3
0
	def predict(self, x_train, y_train, x_test, y_test, embeddings, sequence_length, class_count):
		req_type = type(np.array([]))
		assert type(x_train) == req_type and type(x_test) == req_type
		assert type(y_train) == req_type and type(y_test) == req_type

		from keras.models import Model
		from keras.layers import Input, Dense, Dropout, Flatten, Embedding, LSTM, Bidirectional, LSTMCell, StackedRNNCells, RNN
		from keras.layers.merge import Concatenate
		from keras.optimizers import Adam, Adagrad
		from keras.regularizers import l2

		input_shape = (sequence_length,)
		model_input = Input(shape=input_shape)
		print("Input tensor shape: ", int_shape(model_input))
		model_embedding = Embedding(embeddings.shape[0], 100, input_length=sequence_length, name="embedding")(model_input)
		# model_embedding = Embedding(embeddings.shape[0], embeddings.shape[1], weights=[embeddings], name="embedding")(model_input)
		print("Embeddings tensor shape: ", int_shape(model_embedding))
		# model_recurrent = Bidirectional(LSTM(embeddings.shape[1], activation='relu', dropout=0.2))(model_embedding)
		#####################################################################################################################################

		# cells_forward = [LSTMCell(units=self.output_size), LSTMCell(units=self.output_size), LSTMCell(units=self.output_size)]
		# cells_backward = [LSTMCell(units=self.output_size), LSTMCell(units=self.output_size), LSTMCell(units=self.output_size)]
		cells_forward = [LSTMCell(units=self.output_size)] * 3
		cells_backward = [LSTMCell(units=self.output_size)] * 3
		# LSTM_forward = RNN(cells_forward, go_backwards=False)(model_embedding)
		# LSTM_backward = RNN(cells_backward, go_backwards=True)(model_embedding)

		cells_forward_stacked = StackedRNNCells(cells_forward)
		cells_backward_stacked = StackedRNNCells(cells_backward)
		LSTM_forward = RNN(cells_forward_stacked, go_backwards=False)(model_embedding)
		LSTM_backward = RNN(cells_backward_stacked, go_backwards=True)(model_embedding)

		model_recurrent = Concatenate(axis=-1)([LSTM_forward, LSTM_backward])
		# model_recurrent = Bidirectional(cells_forward)(model_embedding)

		######################################################################################################################################
		model_hidden = Dropout(0.5)(model_recurrent)
		model_output = Dense(class_count, activation="softmax", kernel_regularizer=l2(0.1), bias_regularizer=l2(0.1))(model_hidden)

		model = Model(model_input, model_output)
		optimizer = Adam(lr=self.learning_rate)
		# optimizer = Adagrad(lr=self.learning_rate)
		model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
		# model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])

		# model.fit(x_train, y_train, batch_size=self.batch_size, epochs=self.num_epochs,
		#   validation_data=(x_test, y_test), verbose=2, shuffle=True)

		model.fit(x_train, y_train, batch_size=self.batch_size, epochs=self.num_epochs,
		  validation_split=0.2, verbose=2, shuffle=True)

		score, acc = model.evaluate(x_test, y_test,
							batch_size=self.batch_size)
		print('\nTest score:', score)
		print('Test accuracy:', acc, '\n')

		test_model(model, x_test, y_test)

		return 0
Пример #4
0
 def __init__(self,
              n_dims,
              units,
              activation='tanh',
              recurrent_activation='hard_sigmoid',
              use_bias=True,
              kernel_initializer='glorot_uniform',
              recurrent_initializer='orthogonal',
              bias_initializer='zeros',
              kernel_regularizer=None,
              recurrent_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              recurrent_constraint=None,
              bias_constraint=None,
              dropout=0.,
              recurrent_dropout=0.,
              implementation=1,
              return_sequences=False,
              return_state=False,
              go_backwards=False,
              stateful=False,
              unroll=False,
              reset_after=False,
              **kwargs):
     cell = GRUCellKeepDim(n_dims,
                           units,
                           activation=activation,
                           recurrent_activation=recurrent_activation,
                           use_bias=use_bias,
                           kernel_initializer=kernel_initializer,
                           recurrent_initializer=recurrent_initializer,
                           bias_initializer=bias_initializer,
                           kernel_regularizer=kernel_regularizer,
                           recurrent_regularizer=recurrent_regularizer,
                           bias_regularizer=bias_regularizer,
                           kernel_constraint=kernel_constraint,
                           recurrent_constraint=recurrent_constraint,
                           bias_constraint=bias_constraint,
                           dropout=dropout,
                           recurrent_dropout=recurrent_dropout,
                           implementation=implementation,
                           reset_after=reset_after)
     RNN.__init__(self,
                  cell,
                  return_sequences=return_sequences,
                  return_state=return_state,
                  go_backwards=go_backwards,
                  stateful=stateful,
                  unroll=unroll,
                  **kwargs)
     self.activity_regularizer = regularizers.get(activity_regularizer)
Пример #5
0
def rnn(month, features, true_price, d):

    train_length = int(0.8 * len(month))

    x_train = features[:train_length]
    x_test = features[train_length:]
    label_train = month[:train_length].reshape(-1, 1)
    label_test = month[train_length:].reshape(-1, 1)
    true = true_price[train_length:]

    scaler = StandardScaler()
    scaler = scaler.fit(x_train)

    x_train = scaler.transform(x_train)
    x_test = scaler.transform(x_test)

    #   scaler2 = StandardScaler()
    #   scaler2 = scaler2.fit(label_train)

    #   label_train = scaler2.transform(label_train)
    #   label_test = scaler2.transform(label_test)

    x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))

    x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
    K.clear_session()

    model_rnn = Sequential()
    model_rnn.add(
        RNN(20,
            input_shape=(x_train.shape[1], 1),
            activation='relu',
            return_sequences=True))
    model_rnn.add(Dropout(0.2))
    #   model_rnn.add(RNN(64, activation='relu', return_sequences=True))
    #   model_rnn.add(Dropout(0.2))
    model_rnn.add(RNN(10, activation='relu', return_sequences=False))
    model_rnn.add(Dropout(0.2))
    model_rnn.add(Dense(1))

    model_rnn.compile(optimizer='adam', loss='mean_squared_error')
    early_stop = EarlyStopping(monitor='loss', patience=5, verbose=1)

    model_rnn.fit(x_train, label_train, epochs=1000, batch_size=32, verbose=0)

    # evaluate the keras model
    predicted_y = model_rnn.predict(x_test)

    predicted_y = predicted_y.reshape(len(predicted_y), )

    return true, predicted_y

    number = 0
Пример #6
0
    def build(self, input_shape):

        # input to the kernelized LSTM is a 4D tensor:
        nbatch, nfram, kernels, n_in = input_shape

        cell = self.Cell(kernels, self.units, self.activation, self.recurrent_activation)
        self.rnn = RNN(cell, return_sequences=self.return_sequences, go_backwards=self.go_backwards)

        # the Keras RNN implementation does only work with 3D tensors, hence we flatten the last two dimensions of the input:
        self.rnn.build(input_shape=(nbatch, nfram, kernels*n_in))
        self._trainable_weights = self.rnn.trainable_weights
        super(Kernelized_LSTM, self).build(input_shape)
Пример #7
0
 def __call__(self, input):
     rnn_cell_1 = GRUCell(units=self.rnn_units_1, dropout=self.dropout, recurrent_dropout=self.recurrent_dropout, name=self.name + '_rnn_cell_1' if self.name else None)
     rnn_cell_2 = GRUCell(units=self.rnn_units_2, dropout=self.dropout, recurrent_dropout=self.recurrent_dropout, name=self.name + '_rnn_cell_2' if self.name else None)
     # gru_cell_3 = GRUCell(units= rnn_units_3, dropout= rnn_dropout, recurrent_dropout= rnn_recurrent_dropout, reset_after= False)
     rnn_stack_cell = StackedRNNCells(cells=[rnn_cell_1, rnn_cell_2], name=self.name + '_stacked_rnn_cell' if self.name else None)
     rnn = RNN(cell=rnn_stack_cell, return_state=self.return_state, return_sequences=self.return_sequence, unroll=self.unroll, name=self.name)(input)
     return rnn
Пример #8
0
def gen_RNN():
    #lstm = CuDNNLSTM(6, stateful=True, return_sequences=True)(inp)
    #lstm = LeakyReLU()(lstm)
    #lstm = CuDNNLSTM(3, stateful=True,   return_sequences=True)(lstm)
    #lstm = Activation('sigmoid')(lstm)

    input = Input(batch_shape=(1, 1, 64 * 64 * 3 + num_classes))

    lstm = Concatenate()(
        [Dense(32, activation='elu', name='zoinks')(input), input])
    cells = []
    for i in range(2):
        cells.append(
            LSTMCell(256,
                     recurrent_dropout=0.05,
                     implementation=2,
                     recurrent_initializer='glorot_normal'))
    lstm = RNN(cells, stateful=True, name="RNN_yoooo")(lstm)
    lstm = Dense(128, activation='tanh', name='jinkies')(lstm)
    lstm = Dense(3, activation='tanh', name='yikes')(lstm)
    lstm = Reshape((
        1,
        3,
    ))(lstm)
    model = Model(inputs=input, outputs=lstm)

    return model
Пример #9
0
def load_skip_thought_model(
    encode_size, 
    embedding_words_per_sentence,
    lookup_table
    ):

    word_embedding_size = lookup_table.shape[1]
    
    def __probabilities_calc(x):
        result = K.dot(lookup_table_k, K.permute_dimensions(x, [0,2,1]))
        result = K.permute_dimensions(result, [1, 2, 0])
        result = K.exp(result)    
        return result

            
    lookup_table_k = K.variable(value=lookup_table)

    inputs = Input(shape=(embedding_words_per_sentence, word_embedding_size), name='sentences')

    encoder = GRU(encode_size, use_bias=True, return_sequences=False, name='encoded_sentences')
    previous_decoder_cell = DecoderGRUCell(word_embedding_size, encoder_size=encode_size)
    next_decoder_cell = DecoderGRUCell(word_embedding_size, encoder_size=encode_size)
    repeater = RepeatVector(embedding_words_per_sentence, name='repeat_encode_sentences')
    joiner = Concatenate(name='concat_encode_w_inputs')
    previous_decoder = RNN(previous_decoder_cell, return_sequences=True, name='decoded_previous_sentences')
    next_decoder = RNN(next_decoder_cell, return_sequences=True, name='decoded_next_sentences')
    prev_prob_calculator = Lambda(__probabilities_calc, name='previous_probabilities')
    next_prob_calculator = Lambda(__probabilities_calc, name='next_probabilities')
    

    #prev_prob_calculator = Lambda(__probabilities_calc)
    #next_prob_calculator = Lambda(__probabilities_calc)


    encoded_sentence = encoder(inputs)
    repeated_encoded_sentence = repeater(encoded_sentence)
    concatenated_inputs = joiner([repeated_encoded_sentence, inputs])
    decoded_previous_words = previous_decoder(concatenated_inputs)
    decoded_next_words = next_decoder(concatenated_inputs)
    previous_probs = prev_prob_calculator(decoded_previous_words)
    next_probs = next_prob_calculator(decoded_next_words)
    
    model = Model(inputs=[inputs], outputs=[previous_probs, next_probs], name='skip_thought')

    
    return model
Пример #10
0
def rnn(in_array, target_array):
    # create and fit the model
    model = Sequential()
    model.add(RNN(512, input_shape=(in_array.shape[1], 1)))
    model.add(Dense(target_array.shape[1], activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Пример #11
0
    def create_stacked_lstms(self, hidden_size, num_layers, return_sequences,
                             return_state):
        # Create a list of RNN Cells, these are then concatenated into a single layer
        # with the RNN layer.
        cells = []
        for i in range(num_layers):
            cells.append(LSTMCell(hidden_size))  # TODO: try regularizers

        return RNN(cells,
                   return_sequences=return_sequences,
                   return_state=return_state)
def get_model_soft_sharing_lstm_singleoutput(emb_matrix,
                                             time_steps,
                                             learning_rate=0.001,
                                             n_classes=1,
                                             decay=0.1,
                                             layers=3):

    cells_1 = [LSTMCell(128, dropout=0.2) for i in range(layers)]
    cells_2 = [LSTMCell(128, dropout=0.2) for i in range(layers)]

    input = Input(shape=(time_steps, ), dtype='int32')
    embedding = Embedding(input_dim=emb_matrix.shape[0],
                          output_dim=emb_matrix.shape[1],
                          weights=[emb_matrix],
                          input_length=time_steps,
                          trainable=True)

    sequence_input = embedding(input)
    x = Bidirectional(RNN(cells_1, return_sequences=True))(sequence_input)
    x = Bidirectional(RNN(cells_2, return_sequences=False))(x)
    x = Dropout(0.2)(x)

    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(64, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(64, activation='relu')(x)
    preds = Dense(n_classes, activation='sigmoid')(x)
    model = Model(input, preds)
    adam = optimizers.Adam(lr=learning_rate, decay=DECAY)
    model.compile(loss='binary_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])

    print(model.summary())

    return model
    def get_model(self):
        # Input text
        encoder_inputs = Input(shape=(None, ))
        # Input summary
        decoder_inputs = Input(shape=(None, ))

        # word embedding layer for text
        encoder_inputs_emb = Embedding(input_dim=self.num_encoder_tokens + 1,
                                       output_dim=self.embedding_dim,
                                       mask_zero=True)(encoder_inputs)
        # word embedding layer for summary
        decoder_inputs_emb = Embedding(input_dim=self.num_decoder_tokens + 1,
                                       output_dim=self.embedding_dim,
                                       mask_zero=True)(decoder_inputs)

        # Bidirectional LSTM encoder
        encoder_out = Bidirectional(LSTM(self.hidden_dim // 2,
                                         return_sequences=True,
                                         return_state=True),
                                    merge_mode='concat')(encoder_inputs_emb)

        encoder_o = encoder_out[0]
        initial_h_lstm = concatenate([encoder_out[1], encoder_out[2]])
        initial_c_lstm = concatenate([encoder_out[3], encoder_out[4]])
        initial_decoder_state = Dense(self.hidden_dim, activation='tanh')(
            concatenate([initial_h_lstm, initial_c_lstm]))

        # LSTM decoder + attention
        initial_attention_h = Lambda(lambda x: K.zeros_like(x)[:, 0, :])(
            encoder_o)
        initial_state = [initial_decoder_state, initial_attention_h]

        cell = DenseAnnotationAttention(cell=GRUCell(self.hidden_dim),
                                        units=self.hidden_dim,
                                        input_mode="concatenate",
                                        output_mode="cell_output")

        # TODO output_mode="concatenate", see TODO(3)/A
        decoder_o, decoder_h, decoder_c = RNN(cell=cell,
                                              return_sequences=True,
                                              return_state=True)(
                                                  decoder_inputs_emb,
                                                  initial_state=initial_state,
                                                  constants=encoder_o)
        decoder_o = Dense(self.hidden_dim * 2)(concatenate(
            [decoder_o, decoder_inputs_emb]))
        y_pred = TimeDistributed(
            Dense(self.num_decoder_tokens + 1,
                  activation='softmax'))(decoder_o)

        model = Model([encoder_inputs, decoder_inputs], y_pred)
        return model
Пример #14
0
 def _build_decoder(self):
     decoder_cells = []
     for n_hidden_neurons in self.decoder_layers:
         decoder_cells.append(
             self.cell(units=n_hidden_neurons,
                       dropout=self.dropout,
                       kernel_regularizer=l2(self.l2),
                       recurrent_regularizer=l2(self.l2)))
     # return output for EACH timestamp
     self.decoder = RNN(decoder_cells,
                        return_sequences=True,
                        return_state=True,
                        name='decoder')
def create_embed_model(obs_timesteps=10, pred_timesteps=3, nb_nodes=208, k=1, dgc_mode='hybrid', inner_act=None):
    encoder = RNN(DGCRNNCell(k,dgc_mode=dgc_mode), return_state=True)
    decoder = RNN(DGCRNNCell(k,dgc_mode=dgc_mode), return_sequences=True, return_state=True)
    
    unstack_k = Lambda(unstack)
    choice = Scheduled()
    
    input_obs = Input(shape=(obs_timesteps, nb_nodes, 1)) 
    input_gt = Input(shape=(pred_timesteps, nb_nodes, 1)) #(None, T, N, 1)
    encoder_inputs = Lambda(lambda x: K.squeeze(x, axis = -1))(input_obs) # (None, T, N)
    
    encoder_outputs, state_h = encoder(encoder_inputs)
    
    unstacked = unstack_k(input_gt) #[(None, N, 1) x T] list
    
    initial = unstacked[0] #(None, N, 1)
    decoder_inputs = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(initial) #(None, 1, N)
    decoder_outputs_new, state_h_new = decoder(decoder_inputs, initial_state=state_h)
    state_h = state_h_new
    prediction = []
    decoded_results = decoder_outputs_new
    prediction.append(decoded_results)
    
    if pred_timesteps > 1:       
        for i in range(1,pred_timesteps):
            decoder_inputs = choice([prediction[-1], unstacked[i]])#(None, 208, 1)
            decoder_inputs = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(decoder_inputs)#(None, 1, 208)
            decoder_outputs_new, state_h_new = decoder(decoder_inputs, initial_state=state_h)
            state_h = state_h_new
            decoded_results = decoder_outputs_new
            prediction.append(decoded_results)
    
    outputs = Lambda(stack)(prediction)
    model = Model([input_obs, input_gt], outputs)

    return model
Пример #16
0
def Emojify_RNN_model(input_shape, word_to_vec_map, word_to_index):
    sentence_indices = Input(input_shape, dtype='int32')
    embedding_layer = pretrained_embedding_layer(word_to_vec_map,
                                                 word_to_index)
    embeddings = embedding_layer(sentence_indices)
    # https://stackoverflow.com/questions/45989610/setting-up-the-input-on-an-rnn-in-keras
    # Desired result is a sequence with same length, we will use return_sequences=True. (Else, you'd get only one result).
    # keras.layers.RNN(cell, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False)
    cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
    X = RNN(cells, return_sequences=True)(embeddings)
    X = Dropout(0.5)(X)
    X = Dense(5)(X)
    X = Activation('sigmoid')(X)
    model = Model(inputs=sentence_indices, outputs=X)
    return model
Пример #17
0
    def _build_encoder(self):
        """
        Build the encoder multilayer RNN (stacked RNN)
        """
        # Create a list of RNN Cells, these get stacked one after the other in the RNN,
        # implementing an efficient stacked RNN
        encoder_cells = []
        for n_hidden_neurons in self.encoder_layers:
            encoder_cells.append(
                self.cell(units=n_hidden_neurons,
                          dropout=self.dropout,
                          kernel_regularizer=l2(self.l2),
                          recurrent_regularizer=l2(self.l2)))

        self.encoder = RNN(encoder_cells, return_state=True, name='encoder')
Пример #18
0
def build_model(input_shape):
    batch_shape = (BATCH_SIZE, ) + input_shape

    inputs = []
    for _ in range(FRAMES):
        input = Input(batch_shape=batch_shape)
        inputs.append(input)

    layers = 1
    conv_outputs = []
    for input in inputs:
        filters = 8

        conv_input = input
        for _ in range(layers):
            conv = Conv2D(filters,
                          kernel_size=3,
                          activation='relu',
                          padding='same')(conv_input)
            bnrm = BatchNormalization()(conv)
            pool = MaxPooling2D(8)(bnrm)
            conv_input = pool
            filters *= 2

        flattened = Flatten()(conv_input)
        conv_outputs.append(flattened)

    stacked = Stack()(conv_outputs)

    units = FRAMES

    cell = LSTMCell(units)
    lstm = RNN(cell, unroll=True)(stacked)
    drpt1 = Dropout(0.2)(lstm)

    dense = Dense(units // 2, activation='relu')(drpt1)
    drpt2 = Dropout(0.2)(dense)

    output = Dense(1)(drpt2)

    model = km.Model(inputs=inputs, outputs=output)

    adam = optmzrs.Adam(learning_rate=0.0001, amsgrad=True)
    model.compile(loss='mse', optimizer=adam, metrics=['mae'])

    model.summary()

    return model
Пример #19
0
def build_model(allow_cudnn_kernel=True):
	# CuDNN is only available at the layer level, and not at the cell level.
	# This means `LSTM(units)` will use the CuDNN kernel,
	# while RNN(LSTMCell(units)) will run on non-CuDNN kernel.
	if allow_cudnn_kernel:
		# The LSTM layer with default options uses CuDNN.
		lstm_layer = LSTM(units, input_shape=(None, input_dim))
	else:
		# Wrapping a LSTMCell in a RNN layer will not use CuDNN.
		lstm_layer = RNN(
			LSTMCell(units), input_shape=(None, input_dim)
		)
	model = keras.models.Sequential([
		lstm_layer,
		BatchNormalization(),
		Dense(output_size),
	])
	return model
Пример #20
0
	def __init__(self, gpus=1, batch_size=50, segment_size=12, output_size=12, window_size=15,
		cnn_filters=[2,3,4], hidden_sizes=[10,10],
		learning_rate=0.0001, learning_rate_decay=0, create_tensorboard=False):

		self.segment_size = segment_size
		self.output_size = output_size
		self.gpus = gpus
		
		# Define an input sequence.
		# 1 refers to a single channel of the input
		inputs = Input(shape=(segment_size, window_size, window_size, 1), name="input")

		# cnns
		
		out = TimeDistributed(Conv2D(cnn_filters[0], kernel_size=5, activation='relu', padding='same'), 
			name="cnn_1")(inputs)
		out = TimeDistributed(MaxPooling2D(), name=f"max_pool")(out)
		out = TimeDistributed(Conv2D(cnn_filters[1], kernel_size=5, activation='relu', padding='same'), 
			name="cnn_2")(out)
		out = TimeDistributed(AveragePooling2D(), name=f"avg_pool_1")(out)
		out = TimeDistributed(Conv2D(cnn_filters[2], kernel_size=5, activation='relu', padding='same'), 
			name="cnn_3")(out)
		out = TimeDistributed(AveragePooling2D(), name=f"avg_pool_2")(out)

		out = TimeDistributed(Flatten(), name="flatten_before_lstm")(out)

		cells = [LSTMCell(hidden_sizes[0]), LSTMCell(hidden_sizes[1])]
		out = RNN(cells)(out)

		# out = Flatten(name="flatten_after_lstm")(out)

		out = Dense(100, activation='relu', name=f"mlp_relu")(out)		
		out = Dense(output_size, activation='linear', name=f"mlp_linear")(out)


		self.model = Model(inputs=inputs, outputs=out)
		self.model = model_device_adapter.get_device_specific_model(self.model, gpus)
		
		optimizer = Adam(lr=learning_rate, decay=learning_rate_decay)
		self.model.compile(loss='mse', optimizer=optimizer)

		print(self.model.summary())

		super(CnnLSTM, self).__init__(batch_size=batch_size, create_tensorboard=create_tensorboard)
Пример #21
0
    def __init__(self,
                 rnn,
                 rnn_dim,
                 input_dim,
                 dropout_W=0.0,
                 dropout_U=0.0,
                 cnn_border_mode='same'):
        if rnn == 'lstm':
            from keras.layers import CuDNNLSTM as RNN
        elif rnn == 'sru':
            from nea.cell import SRU as RNN
        elif rnn == 'nlstm':
            from nea.cell import NestedLSTM as RNN
        elif rnn == 'gru':
            from keras.layers import CuDNNGRU as RNN
        elif rnn == 'simple':
            from keras.layers.recurrent import SimpleRNN as RNN
        elif rnn == 'indrnn':
            from nea.cell import IndRNN as RNN
        self.model = Sequential()
        self.model.add(
            Conv1D(filters=100,
                   kernel_size=3,
                   padding=cnn_border_mode,
                   strides=1,
                   input_shape=input_dim))
        for i in range(MC.DEPTH):
            self.model.add(
                Bidirectional(
                    RNN(
                        rnn_dim,
                        # dropout=dropout_W,
                        #recurrent_dropout=dropout_U,
                        return_sequences=True), ))

        if MC.HIGHWAY:
            self.model.add(TimeDistributed(Highway(activation='tanh')))
        #self.model.add(TimeDistributed(Dense(MC.DENSE_DIM,activation='relu')))
        self.model.add(Dropout(MC.DROPOUT))
        self.model.add(Attention())
Пример #22
0
def cell_unit_rnn():
    import keras
    from keras.layers import RNN
    import keras.backend.tensorflow_backend as K
    
    class MinimalRNNCell(keras.layers.Layer):
        def __init__(self, units, **kwargs):
            self.units = units
            self.state_size = units
            super(MinimalRNNCell, self).__init__(**kwargs)
        
        def build(self, input_shape):
            self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
                                          initializer='uniform',
                                          name='kernel')
            self.recurrent_kernel = self.add_weight(
                shape=(self.units, self.units),
                initializer='uniform',
                name='recurrent_kernel')
            self.built = True
        
        def call(self, inputs, states):
            prev_output = states[0]
            h = K.dot(inputs, self.kernel)
            print('hidden', h)
            output = h + K.dot(prev_output, self.recurrent_kernel)
            return output, [output]
    
    # Let's use this cell in a RNN layer:
    
    cell = MinimalRNNCell(100)
    x = keras.Input(batch_shape=(3000, 28, 28))
    # When return_state is True,
    # [<tf.Tensor 'rnn_1/TensorArrayReadV3:0' shape=(3000, 100) dtype=float64>,
    # <tf.Tensor 'rnn_1/while/Exit_2:0' shape=(3000, 100) dtype=float64>]
    
    # When return_state is False,
    # Tensor("rnn_1/TensorArrayReadV3:0", shape=(3000, 100), dtype=float64)
    layer = RNN(cell, return_state=True)
    y = layer(x)
Пример #23
0
    def __build_prednet_layer(self, model_type):
        if model_type == ModelType.PREDNET or model_type == ModelType.SINGLE_PIXEL_ACTIVATION:
            prednet = PredNet.build_from_params(self.params)
        elif model_type == ModelType.CONV_PREDNET:
            prednet = ConvPredNet.build_from_params(self.params)
            if self.autoencoder is None and not self.trainable_autoencoder:
                raise ValueError(
                    "Pretrained autoencoder not set and autoencoder is not trainable!"
                )
        elif model_type == ModelType.AMPLIF_ERROR:
            prednet = AmplifiedErrorPredNet.build_from_params(self.params)
        elif model_type == ModelType.STATE_VECTOR:
            cell = StateVectorPredNetCell(self.balls * 4, self.nt,
                                          self.nb_layers,
                                          self.params.extrap_start_time,
                                          self.params.extrap_end_time,
                                          self.params.output_mode)
            prednet = RNN(cell, return_sequences=True)
        elif model_type == ModelType.CONCAT_PREDNET:
            prednet = ConcatPredNet.build_from_params(self.params)

        return prednet
Пример #24
0
def make_model(vocabulary_size,
               hidden_size,
               num_steps,
               use_dropout=True,
               lstm=False):
    model = Sequential()
    model.add(Embedding(vocabulary_size, hidden_size, input_length=num_steps))

    if lstm:
        model.add(LSTM(hidden_size, return_sequences=True))
        model.add(LSTM(hidden_size, return_sequences=True))
    else:
        model.add(
            RNN(SimpleRNNCell(hidden_size, hidden_size, hidden_size),
                return_sequences=True))

    if use_dropout:
        model.add(Dropout(0.5))
    model.add(TimeDistributed(Dense(vocabulary_size)))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['categorical_accuracy'])
    return model
Пример #25
0
def test_Bidirectional_with_constants_layer_passing_initial_state():
    class RNNCellWithConstants(Layer):
        def __init__(self, units, **kwargs):
            self.units = units
            self.state_size = units
            super(RNNCellWithConstants, self).__init__(**kwargs)

        def build(self, input_shape):
            if not isinstance(input_shape, list):
                raise TypeError('expects constants shape')
            [input_shape, constant_shape] = input_shape
            # will (and should) raise if more than one constant passed

            self.input_kernel = self.add_weight(shape=(input_shape[-1],
                                                       self.units),
                                                initializer='uniform',
                                                name='kernel')
            self.recurrent_kernel = self.add_weight(shape=(self.units,
                                                           self.units),
                                                    initializer='uniform',
                                                    name='recurrent_kernel')
            self.constant_kernel = self.add_weight(shape=(constant_shape[-1],
                                                          self.units),
                                                   initializer='uniform',
                                                   name='constant_kernel')
            self.built = True

        def call(self, inputs, states, constants):
            [prev_output] = states
            [constant] = constants
            h_input = K.dot(inputs, self.input_kernel)
            h_state = K.dot(prev_output, self.recurrent_kernel)
            h_const = K.dot(constant, self.constant_kernel)
            output = h_input + h_state + h_const
            return output, [output]

        def get_config(self):
            config = {'units': self.units}
            base_config = super(RNNCellWithConstants, self).get_config()
            return dict(list(base_config.items()) + list(config.items()))

    # Test basic case.
    x = Input((5, 5))
    c = Input((3, ))
    s_for = Input((32, ))
    s_bac = Input((32, ))
    cell = RNNCellWithConstants(32)
    custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
    with CustomObjectScope(custom_objects):
        layer = wrappers.Bidirectional(RNN(cell))
    y = layer(x, initial_state=[s_for, s_bac], constants=c)
    model = Model([x, s_for, s_bac, c], y)
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch([
        np.zeros((6, 5, 5)),
        np.zeros((6, 32)),
        np.zeros((6, 32)),
        np.zeros((6, 3))
    ], np.zeros((6, 64)))

    # Test basic case serialization.
    x_np = np.random.random((6, 5, 5))
    s_fw_np = np.random.random((6, 32))
    s_bk_np = np.random.random((6, 32))
    c_np = np.random.random((6, 3))
    y_np = model.predict([x_np, s_fw_np, s_bk_np, c_np])
    weights = model.get_weights()
    config = layer.get_config()
    with CustomObjectScope(custom_objects):
        layer = wrappers.Bidirectional.from_config(copy.deepcopy(config))
    y = layer(x, initial_state=[s_for, s_bac], constants=c)
    model = Model([x, s_for, s_bac, c], y)
    model.set_weights(weights)
    y_np_2 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
    assert_allclose(y_np, y_np_2, atol=1e-4)

    # verify that state is used
    y_np_2_different_s = model.predict(
        [x_np, s_fw_np + 10., s_bk_np + 10., c_np])
    with pytest.raises(AssertionError):
        assert_allclose(y_np, y_np_2_different_s, atol=1e-4)

    # test flat list inputs
    with CustomObjectScope(custom_objects):
        layer = wrappers.Bidirectional.from_config(copy.deepcopy(config))
    y = layer([x, s_for, s_bac, c])
    model = Model([x, s_for, s_bac, c], y)
    model.set_weights(weights)
    y_np_3 = model.predict([x_np, s_fw_np, s_bk_np, c_np])
    assert_allclose(y_np, y_np_3, atol=1e-4)
Пример #26
0
data = np.array(newdata)
X = data[:, 0:101]
Y = data[:, 5]
train_x = X[:int(len(X) * 0.7)]
test_x = X[int(len(X) * 0.7) + 1:]
train_y = Y[:int(len(Y) * 0.7)]
test_y = Y[int(len(Y) * 0.7) + 1:]
train_x = train_x.reshape((train_x.shape[0], 1, train_x.shape[1]))
test_x = test_x.reshape((test_x.shape[0], 1, test_x.shape[1]))
print(train_x.shape)
print(test_y.shape)

# design network
model = Sequential()
model.add(RNN(200))
model.add(RNN(200))
model.add(Dense(1, activation='relu'))
model.compile(loss='mae', optimizer='adam', metrics=['accuracy'])
# fit network
history = model.fit(train_x,
                    train_y,
                    epochs=20,
                    batch_size=100,
                    validation_data=(test_x, test_y),
                    verbose=1,
                    shuffle=True)

# plot history
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
Пример #27
0
    os.makedirs(config.log_dir)

tf.set_random_seed(config.seed)
np.random.seed(config.seed)

num_steps_ahead = 1
n_layers = 1

layers = [config.m] * n_layers

# Encoder

# driving series with shape (T, n)
encoder_inputs = Input(shape=(None, config.n))  # add endogenous series

encoder = RNN([LSTMCell(units) for units in layers], return_state=True)

encoder_out_and_states = encoder(encoder_inputs)

# Decoder
decoder_inputs = Input(shape=(config.T - num_steps_ahead, ))

z_i = Dense(64)(decoder_inputs)

encoder_out = concatenate(encoder_out_and_states + [z_i])

z = Dense(256)(encoder_out)
decoder_outs = Dense(len(config.target_cols), activation="linear")(z)

model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_outs)
model.compile(optimizer="adam", loss="mse")
Пример #28
0
    def __construct_rnn_model__(self):
        model = Sequential()
        n_rnn_layers = len(self.n_units)

        if n_rnn_layers > 1:
            is_return_sequences = True
        else:
            is_return_sequences = False

        if self.is_self_activation:
            is_return_sequences_last_recurrent_layer = True
        else:
            is_return_sequences_last_recurrent_layer = False
        print(is_return_sequences_last_recurrent_layer)

        assert self.method is not None

        if self.method == 'rnn':
            print(self.max_sent_len)
            model.add(
                RNN(
                    self.n_units[0],
                    input_shape=(self.max_sent_len, self.word_emb_dim),
                    return_sequences=is_return_sequences,
                    kernel_regularizer=self.input_reg,
                    recurrent_regularizer=self.recurr_reg,
                    dropout=self.dropout,
                    recurrent_dropout=self.recurrent_dropout,
                ))

            for l_idx, n_unit in enumerate(self.n_units[1:]):
                if l_idx < (n_rnn_layers - 2):
                    model.add(
                        RNN(
                            n_unit,
                            return_sequences=True,
                            kernel_regularizer=self.input_reg,
                            recurrent_regularizer=self.recurr_reg,
                            dropout=self.dropout,
                            recurrent_dropout=self.recurrent_dropout,
                        ))
                else:
                    model.add(
                        RNN(
                            n_unit,
                            return_sequences=
                            is_return_sequences_last_recurrent_layer,
                            kernel_regularizer=self.input_reg,
                            recurrent_regularizer=self.recurr_reg,
                            dropout=self.dropout,
                            recurrent_dropout=self.recurrent_dropout,
                        ))
        elif self.method == 'lstm':
            print(self.max_sent_len)
            model.add(
                LSTM(
                    self.n_units[0],
                    input_shape=(self.max_sent_len, self.word_emb_dim),
                    return_sequences=is_return_sequences,
                    kernel_regularizer=self.input_reg,
                    recurrent_regularizer=self.recurr_reg,
                    dropout=self.dropout,
                    recurrent_dropout=self.recurrent_dropout,
                ))
            for l_idx, n_unit in enumerate(self.n_units[1:]):
                if l_idx < n_rnn_layers - 2:
                    model.add(
                        LSTM(
                            n_unit,
                            return_sequences=True,
                            kernel_regularizer=self.input_reg,
                            recurrent_regularizer=self.recurr_reg,
                            dropout=self.dropout,
                            recurrent_dropout=self.recurrent_dropout,
                        ))
                else:
                    model.add(
                        LSTM(
                            n_unit,
                            return_sequences=
                            is_return_sequences_last_recurrent_layer,
                            kernel_regularizer=self.input_reg,
                            recurrent_regularizer=self.recurr_reg,
                            dropout=self.dropout,
                            recurrent_dropout=self.recurrent_dropout,
                        ))
        elif self.method == 'gru':
            model.add(
                GRU(self.n_units[0],
                    input_shape=(self.max_sent_len, self.word_emb_dim),
                    return_sequences=is_return_sequences,
                    kernel_regularizer=self.input_reg,
                    recurrent_regularizer=self.recurr_reg))
            for l_idx, n_unit in enumerate(self.n_units[1:]):
                if l_idx < n_rnn_layers - 2:
                    model.add(
                        GRU(n_unit,
                            return_sequences=True,
                            kernel_regularizer=self.input_reg,
                            recurrent_regularizer=self.recurr_reg))
                else:
                    model.add(
                        GRU(n_unit,
                            return_sequences=
                            is_return_sequences_last_recurrent_layer,
                            kernel_regularizer=self.input_reg,
                            recurrent_regularizer=self.recurr_reg))
        elif self.method == 'bi-lstm':
            print(self.max_sent_len)
            model.add(
                Bidirectional(
                    LSTM(
                        self.n_units[0],
                        return_sequences=(
                            is_return_sequences
                            or is_return_sequences_last_recurrent_layer),
                        kernel_regularizer=self.input_reg,
                        recurrent_regularizer=self.recurr_reg,
                        dropout=self.dropout,
                        recurrent_dropout=self.recurrent_dropout,
                    ),
                    input_shape=(self.max_sent_len, self.word_emb_dim),
                ))
            for l_idx, n_unit in enumerate(self.n_units[1:]):
                if l_idx < (n_rnn_layers - 2):
                    model.add(
                        Bidirectional(
                            LSTM(
                                n_unit,
                                return_sequences=True,
                                kernel_regularizer=self.input_reg,
                                recurrent_regularizer=self.recurr_reg,
                                dropout=self.dropout,
                                recurrent_dropout=self.recurrent_dropout,
                            )))
                else:
                    model.add(
                        Bidirectional(
                            LSTM(
                                n_unit,
                                return_sequences=
                                is_return_sequences_last_recurrent_layer,
                                kernel_regularizer=self.input_reg,
                                recurrent_regularizer=self.recurr_reg,
                                dropout=self.dropout,
                                recurrent_dropout=self.recurrent_dropout,
                            )))
        elif self.method == 'bi-gru':
            model.add(
                Bidirectional(
                    GRU(self.n_units[0],
                        return_sequences=is_return_sequences,
                        kernel_regularizer=self.input_reg,
                        recurrent_regularizer=self.recurr_reg),
                    input_shape=(self.max_sent_len, self.word_emb_dim),
                ))
            for l_idx, n_unit in enumerate(self.n_units[1:]):
                if l_idx < (n_rnn_layers - 2):
                    model.add(
                        Bidirectional(
                            GRU(n_unit,
                                return_sequences=True,
                                kernel_regularizer=self.input_reg,
                                recurrent_regularizer=self.recurr_reg)))
                else:
                    model.add(
                        Bidirectional(
                            GRU(n_unit,
                                return_sequences=
                                is_return_sequences_last_recurrent_layer,
                                kernel_regularizer=self.input_reg,
                                recurrent_regularizer=self.recurr_reg)))
        elif self.method == 'lstm-cnn':
            print(self.max_sent_len)
            model.add(
                Conv1D(self.filters,
                       self.kernel_size,
                       input_shape=(self.max_sent_len, self.word_emb_dim),
                       padding='valid',
                       activation='relu',
                       strides=1))
            model.add(MaxPooling1D(pool_size=self.pool_size))
            model.add(
                LSTM(
                    self.n_units[0],
                    return_sequences=is_return_sequences,
                    kernel_regularizer=self.input_reg,
                    recurrent_regularizer=self.recurr_reg,
                    dropout=self.dropout,
                    recurrent_dropout=self.recurrent_dropout,
                ))
            for l_idx, n_unit in enumerate(self.n_units[1:]):
                if l_idx < n_rnn_layers - 2:
                    model.add(
                        LSTM(
                            n_unit,
                            return_sequences=True,
                            kernel_regularizer=self.input_reg,
                            recurrent_regularizer=self.recurr_reg,
                            dropout=self.dropout,
                            recurrent_dropout=self.recurrent_dropout,
                        ))
                else:
                    model.add(
                        LSTM(
                            n_unit,
                            return_sequences=
                            is_return_sequences_last_recurrent_layer,
                            kernel_regularizer=self.input_reg,
                            recurrent_regularizer=self.recurr_reg,
                            dropout=self.dropout,
                            recurrent_dropout=self.recurrent_dropout,
                        ))
        elif self.method == 'gru-cnn':
            print(self.max_sent_len)
            model.add(
                Conv1D(self.filters,
                       self.kernel_size,
                       input_shape=(self.max_sent_len, self.word_emb_dim),
                       padding='valid',
                       activation='relu',
                       strides=1))
            model.add(MaxPooling1D(pool_size=self.pool_size))
            model.add(
                GRU(
                    self.n_units[0],
                    return_sequences=is_return_sequences,
                    kernel_regularizer=self.input_reg,
                    recurrent_regularizer=self.recurr_reg,
                    dropout=self.dropout,
                    recurrent_dropout=self.recurrent_dropout,
                ))
            for l_idx, n_unit in enumerate(self.n_units[1:]):
                if l_idx < n_rnn_layers - 2:
                    model.add(
                        GRU(
                            n_unit,
                            return_sequences=True,
                            kernel_regularizer=self.input_reg,
                            recurrent_regularizer=self.recurr_reg,
                            dropout=self.dropout,
                            recurrent_dropout=self.recurrent_dropout,
                        ))
                else:
                    model.add(
                        GRU(
                            n_unit,
                            return_sequences=
                            is_return_sequences_last_recurrent_layer,
                            kernel_regularizer=self.input_reg,
                            recurrent_regularizer=self.recurr_reg,
                            dropout=self.dropout,
                            recurrent_dropout=self.recurrent_dropout,
                        ))
        elif self.method == 'cnn':
            print(self.max_sent_len)
            model.add(
                Conv1D(self.filters,
                       self.kernel_size,
                       input_shape=(self.max_sent_len, self.word_emb_dim),
                       padding='valid',
                       activation='relu',
                       strides=1))
            model.add(GlobalMaxPooling1D())

        if self.is_self_activation:
            model.add(SeqSelfAttention(attention_activation='sigmoid'))
            model.add(Flatten())

        for dense_n_unit in self.dense_units:
            model.add(Dense(dense_n_unit, activation='relu'))

        model.add(Dense(self.label_dim, activation='sigmoid'))

        if self.learn_algo == 'sg':
            optimizer = SGD(lr=self.learning_rate,
                            momentum=0.0,
                            decay=0.0,
                            nesterov=False)
        elif self.learn_algo == 'adam':
            optimizer = Adam(
                lr=self.learning_rate,
                # beta_1=0.9,
                # beta_2=0.999,
                # epsilon=1e-08,
                amsgrad=True,
            )
        else:
            raise NotImplementedError

        if self.is_cross_entropy_loss:
            model.compile(
                optimizer=optimizer,
                loss='binary_crossentropy',
                metrics=['binary_accuracy'],
            )
        else:
            model.compile(
                loss='mean_squared_error',
                optimizer=optimizer,
            )

        return model
Пример #29
0
    def build(self, input_shape):
        self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
                                      initializer='uniform',
                                      name='kernel')
        self.recurrent_kernel = self.add_weight(shape=(self.units, self.units),
                                                initializer='uniform',
                                                name='recurrent_kernel')
        self.built = True

    def call(self, inputs, states):
        prev_output = states[0]
        h = K.dot(inputs, self.kernel)
        output = h + K.dot(prev_output, self.recurrent_kernel)
        return output, [output]


# Let's use this cell in a RNN layer:

cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)

# Here's how to use the cell to build a stacked RNN:

cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)

print(y)
Пример #30
0
earlyStopping = EarlyStopping(monitor='val_loss', patience=1, verbose=0)
saveBestModel = ModelCheckpoint(save_best_model_file,
                                monitor='val_loss',
                                verbose=0,
                                save_best_only=True,
                                save_weights_only=True)

embeddings_layer = Embedding(len(word_index) + 1,
                             embedding_dim,
                             input_length=max_sequence_length,
                             trainable=True)

SENT_HIDDEN_SIZE = 100
inputs = Input(shape=(max_sequence_length, ), dtype='int32', name='input')
embeddings_sequences = embeddings_layer(inputs)
output = RNN(SENT_HIDDEN_SIZE)(embeddings_sequences)
output = Dense(64, activation='relu', name='dense1')(output)

print(output)

output = Dense(1, activation='sigmoid')(output)
model = Model(inputs=inputs, outputs=[output])
model.summary()
model.compile(loss='binary_crossentropy',
              optimizer=Adam(0.0001),
              metrics=['accuracy'])
checkpoint_filepath = 'E:/DeepLearning/bully_code/diyu/indrnn.h5'
checkpoint = ModelCheckpoint(checkpoint_filepath,
                             monitor='acc',
                             verbose=0,
                             save_best_only=True,