def test_fit(self): x = np.random.standard_normal((1024, 3, 5, 5)) y = (x[:, 1, 2, 3] > 0).astype('int32') model = Sequential() model.add( KernelConv2D(input_shape=(3, 5, 5), filters=4, kernel_size=3, kernel_function=LinearKernel(), data_format='channels_first')) model.add( KernelConv2D(filters=3, kernel_size=3, kernel_function=LinearKernel(), padding='same', data_format='channels_last')) model.add(Flatten()) model.add(Dense(units=2, activation='softmax')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy') model.summary() model.fit(x, y, epochs=30) model_path = os.path.join(tempfile.gettempdir(), 'test_knn_%f.h5' % np.random.random()) model.save(model_path) model = load_model(model_path, custom_objects={ 'KernelConv2D': KernelConv2D, 'LinearKernel': LinearKernel, }) predicted = model.predict(x).argmax(axis=-1) self.assertLess(np.sum(np.abs(y - predicted)), 200)
def discriminator_model(img_shape): model = Sequential() model.add( Conv2D(32, kernel_size=3, strides=2, input_shape=img_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=3, strides=2, padding="same")) model.add(ZeroPadding2D(padding=((0, 1), (0, 1)))) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, kernel_size=3, strides=1, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=img_shape) validity = model(img) return Model(img, validity)
def create_generator(): # Create the Generator network structure generator = Sequential() generator.add(Dense(12544, input_dim=100)) generator.add(BatchNormalization(momentum=0.9)) generator.add(Activation('relu')) generator.add(Reshape((7, 7, 256))) generator.add(Dropout(0.4)) generator.add(UpSampling2D()) generator.add(Conv2DTranspose(int(128), 5, padding='same')) generator.add(BatchNormalization(momentum=0.9)) generator.add(Activation('relu')) generator.add(UpSampling2D()) generator.add(Conv2DTranspose(int(64), 5, padding='same')) generator.add(BatchNormalization(momentum=0.9)) generator.add(Activation('relu')) generator.add(Conv2DTranspose(int(32), 5, padding='same')) generator.add(BatchNormalization(momentum=0.9)) generator.add(Activation('relu')) generator.add(Conv2DTranspose(1, 5, padding='same')) generator.add(Activation('sigmoid')) generator.compile(optimizer=RMSprop(lr=0.0004, clipvalue=1.0, decay=3e-8), loss='binary_crossentropy', metrics=['accuracy']) generator.summary() return generator
def train_rnn(filename): X_train_token, X_dev_token, X_test_token, y_train, y_dev, y_test, tokenizer = my_data.generate_rnn( filename) paragram_embeddings = load_para(tokenizer.word_index) model = Sequential() optimizer = Adam(lr=1e-3) model.add( Embedding(weights=[paragram_embeddings], trainable=False, input_dim=num_words, output_dim=embedding_size, input_length=max_tokens)) model.add(GRU(units=32, return_sequences=True)) model.add(GRU(units=16, dropout=0.5, return_sequences=True)) model.add(GRU(units=8, return_sequences=True)) model.add(GRU(units=4)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['AUC', 'accuracy']) model.summary() history = model.fit(np.array(X_train_token), y_train, validation_data=(np.array(X_dev_token), y_dev), epochs=4, batch_size=500) save_model(model, path + 'rnn_model_ref.h5') logging.info('train complete') return model
def init_model(self, input_shape, num_classes, max_layer_num=5, **kwargs): # FIXME: keras sequential model is better than keras functional api, # why??? model = Sequential() min_size = min(input_shape[:2]) for i in range(max_layer_num): if i == 0: model.add( Conv2D(64, 3, input_shape=input_shape, padding='same')) else: model.add(Conv2D(64, 3, padding='same')) model.add(Activation('relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) min_size //= 2 if min_size < 2: break model.add(Flatten()) model.add(Dense(64)) model.add(Dropout(rate=0.5)) model.add(Activation('relu')) model.add(Dense(num_classes)) model.add(Activation('softmax')) # optimizer = tf.keras.optimizers.SGD(lr=0.01, decay=1e-6) optimizer = tf.keras.optimizers.Adam() # optimizer = optimizers.SGD(lr=1e-3, decay=2e-4, momentum=0.9, clipvalue=5) model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) model.summary() self.is_init = True self._model = model
def build_generator(channels, num_classes, latent_dim): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=latent_dim)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) #7x7x128 model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) #14x14x128 model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) #28x28x64 model.add(Conv2D(channels, kernel_size=3, padding='same')) model.add(Activation("tanh")) #28x28x3 model.summary() noise = Input(shape=(latent_dim, )) label = Input(shape=(1, ), dtype='int32') label_embedding = Flatten()(Embedding(num_classes, latent_dim)(label)) model_input = multiply([noise, label_embedding]) img = model(model_input) return Model([noise, label], img)
def Red_Neuronal(Neuronas, Capas, Color, Filtros, Fila_pooling, Columna_pooling, Fila_filtros, Columna_filtros, Valor_LeakyReLu): cnn = Sequential() ## Define la red como una consecucion de capas Capas = Capas - 1 cnn.add( Capa_Inicial(Color, Filtros, Fila_pooling, Columna_pooling, Fila_filtros, Columna_filtros, Valor_LeakyReLu)) for x in range(Capas): cnn.add( Capas_Ocultas(Fila_filtros, Columna_filtros, Filtros, Fila_pooling, Columna_pooling, Valor_LeakyReLu, x)) cnn.add( Flatten() ) ## Hace plana la imagen final comprimiendo toda la profundidad que tenia cnn.add(Dense(Neuronas, activation='relu')) ##Capa densa de 256 neuronas cnn.add( Dropout(0.5) ) ## Durante el entrenamiento "apaga" la mitad de las neuronas de la capa densa para evitar overfitting cnn.add( Dense(2, activation='softmax') ) ## softmax saca las probabilidades de que pertenezca a cada clase cnn.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0005), metrics=['accuracy']) cnn.summary() return cnn
def build_critic(self): model = Sequential() model.add( Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(32, kernel_size=3, strides=2, padding="same")) model.add(ZeroPadding2D(padding=((0, 1), (0, 1)))) model.add(BatchNormalization(momentum=0.8)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=3, strides=2, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Conv2D(128, kernel_size=3, strides=1, padding="same")) model.add(BatchNormalization(momentum=0.8)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1)) model.summary() img = Input(shape=self.img_shape) validity = model(img) return Model(img, validity)
def build_model(self): model = Sequential() model.add(Conv2D(32, (4, 4), strides=(2, 2), input_shape=(24, 10, 1), # batch_size=64, kernel_initializer=initializers.glorot_uniform(), activation=activations.relu, kernel_regularizer=regularizers.l2(0.01))) # kernel initialize weights model.add(Conv2D(64, (3, 3), strides=(1, 1), activation=activations.relu)) model.add(Conv2D(128, (2, 2), strides=(1, 1), activation=activations.relu)) # model.add(Dropout(0.5)) model.add(Flatten()) # model.add(Dense(512, input_dim=self.state_size, activation=activations.linear)) # autograd,PLRelu,RMS Prob # # model.add(LeakyReLU(alpha=0.3)) # model.add(BatchNormalization(momentum=0.99, epsilon=0.001)) # # model.add(LeakyReLU(alpha=0.3)) # model.add(Dense(256, activation=activations.linear)) # model.add(BatchNormalization(momentum=0.99, epsilon=0.001)) # model.add(Dense(128, activation=activations.linear)) # model.add(BatchNormalization(momentum=0.99, epsilon=0.001)) # model.add(Dense(64, activation=activations.linear)) # model.add(BatchNormalization(momentum=0.99, epsilon=0.001)) model.add(Dense(self.action_size, activation=activations.softmax)) model.compile(loss=losses.categorical_crossentropy, # loss='mse' losses.categorical_crossentropy optimizer=optimizers.RMSprop(lr=self.LEARNING_RATE)) # RMSprob,Adam,Nadam self.tensorBoard = TensorBoard('./logs/RLAgent', histogram_freq=0, write_graph=True, write_images=True) model.summary() return model
def createModel( self, inputs, outputs, hiddenLayers, activationType, learningRate ): model = Sequential() if len(hiddenLayers) == 0: model.add( Dense(self.output_size, input_shape=(self.input_size,), init='lecun_uniform') ) model.add( Activation('linear') ) else: model.add( Dense(hiddenLayers[0], input_shape=(self.input_size,), kernel_initializer='lecun_uniform') ) if activationType == 'LeakyReLU': model.add( LeakyReLU(alpha=0.01) ) else: model.add( Activation(activationType) ) for index in range(1,len(hiddenLayers)): layerSize = hiddenLayers[index] model.add( Dense(layerSize,kernel_initializer='lecun_uniform') ) if activationType == 'LeakyReLU': model.add( LeakyReLU(alpha=0.01) ) else: model.add( Activation(activationType) ) model.add( Dense(self.output_size,kernel_initializer='lecun_uniform') ) model.add( Activation('linear') ) optimizer = optimizers.RMSprop( lr = learningRate, rho = 0.9, epsilon = 1e-6 ) model.compile( loss = "mse", optimizer = optimizer ) model.summary() return model
def train(csvFile): data = pd.read_csv(csvFile) sol_predict = data["Solubility Score"] data.drop(["Solubility Score", "PDB File", "Sequence"], inplace=True, axis=1) model = Sequential([ layers.Dense(128, activation='relu', input_shape=(117, )), layers.Dense(128, activation='relu'), layers.Dense(128, activation='relu'), layers.Dense(64, activation='relu'), layers.Dense(32, activation='relu'), layers.Dense(16, activation='relu'), layers.Dense(1) ]) model.summary() optimizer = tf.compat.v1.keras.optimizers.Adam(learning_rate=0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) modelMLP = model.fit(data, sol_predict, epochs=1000, batch_size=32, verbose=1) print(modelMLP) tf.keras.models.save_model(model, 'model1', save_format='h5')
def main(): (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() x_train = x_train.reshape(60000, 784) x_train = x_train / 255 x_test = x_test.reshape(10000, 784) x_test = x_test / 255 y_train = utils.to_categorical(y_train, 10) y_test = utils.to_categorical(y_test, 10) model = Sequential() model.add(Dense(784, input_dim=784, activation="relu")) model.add(Dense(10, activation="softmax")) model.compile(loss="categorical_crossentropy", optimizer="SGD", metrics=["accuracy"]) model.summary() callback = [ TensorBoard(log_dir='logs', histogram_freq=1, write_images=True) ] model.fit(x_train, y_train, batch_size=200, epochs=300, verbose=1, validation_split=0.2, callbacks=callback) model.save("fashion_model.h5") score = model.evaluate(x_test, y_test, verbose=1) print("Accuracy on test data is", score[1] * 100, "percent")
def build_discriminator(img_shape, num_classes, optimizer): model = Sequential() model.add(Dense(512, input_dim=np.prod(img_shape))) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.4)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.4)) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=img_shape) label = Input(shape=(1, ), dtype='int32') label_embedding = Flatten()(Embedding(num_classes, np.prod(img_shape))(label)) flat_img = Flatten()(img) model_input = multiply([flat_img, label_embedding]) validity = model(model_input) # Compilar discriminator Model2 = Model([img, label], validity) Model2.compile(loss=['binary_crossentropy'], optimizer=optimizer, metrics=['accuracy']) return Model2
def prepare_model(embeddings): shared_model = Sequential() # Set trainble=False so that the gradient decent will not optimize the embeddings. shared_model.add( Embedding(len(embeddings), embedding_dim, weights=[embeddings], input_shape=(max_seq_length, ), trainable=False)) shared_model.add(LSTM(n_hidden)) q1_input = Input(shape=(max_seq_length, ), dtype='int32') q2_input = Input(shape=(max_seq_length, ), dtype='int32') malstm_distance = ManhattanDistance()( [shared_model(q1_input), shared_model(q2_input)]) # The Model below is due from: from tensorflow.python.keras.models import Model model = Model(inputs=[q1_input, q2_input], outputs=[malstm_distance]) # if gpus >= 2: # model = tf.keras.utils.multi_gpu_model(model, gpus=gpus) # Adam() from here: from tensorflow.python.keras.optimizers import Adam model.compile(loss='mean_squared_error', optimizer=Adam(), metrics=['accuracy']) model.summary() shared_model.summary() return model
def build_generator(img_shape, num_classes, latent_dim): model = Sequential() model.add(Dense(256, input_dim=latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(img_shape), activation='tanh')) model.add(Reshape(img_shape)) model.summary() noise = Input(shape=(latent_dim, )) label = Input(shape=(1, ), dtype='int32') label_embedding = Flatten()(Embedding(num_classes, latent_dim)(label)) model_input = multiply([noise, label_embedding]) img = model(model_input) return Model([noise, label], img)
def simple_regression(x=[], y=[], plot=False): y = np.reshape(y, (-1, 1)) scaler_x = MinMaxScaler() scaler_y = MinMaxScaler() print(scaler_x.fit(x)) xscale = scaler_x.transform(x) print(scaler_y.fit(y)) yscale = scaler_y.transform(y) X_train, X_test, y_train, y_test = train_test_split(xscale, yscale) model = Sequential() model.add(Dense(12, input_dim=1557, kernel_initializer="normal", activation="relu")) model.add(Dense(8, activation="relu")) model.add(Dense(1, activation="linear")) model.summary() model.compile(loss="mse", optimizer="adam", metrics=["mse", "mae"]) history = model.fit( X_train, y_train, epochs=150, batch_size=50, verbose=1, validation_split=0.2 ) print(history.history.keys()) # "Loss" if plot == True: plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "validation"], loc="upper left") plt.show() Xnew = x[-1] Xnew = scaler_x.transform(Xnew) ynew = model.predict(Xnew) ynew = scaler_y.inverse_transform(ynew) Xnew = scaler_x.inverse_transform(Xnew) print("X=%s, Predicted=%s" % (Xnew[0], ynew[0]))
def create_discriminator(): model = Sequential() model.add( Conv2D(32, kernel_size=3, strides=2, padding="same", input_shape=(256, 256, 3))) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(64, kernel_size=3, strides=2, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=adam_optimizer()) model.summary() return model
def classifyMammograms(): X, y = readData() X = numpy.array(X) y = numpy.array(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=123, shuffle=True) y_train = to_categorical(y_train) y_test = to_categorical(y_test) model = Sequential() model.add( Dense(128, activation='relu', input_dim=len(X[0]), kernel_regularizer='l2', kernel_initializer=he_normal(seed=None))) model.add(Dense(128)) model.add(Dense(64, activation='relu')) model.add(Dense(64)) model.add(Dense(32, activation='relu')) model.add(Dense(32)) model.add(Dense(16, activation='relu')) model.add(Dense(16)) model.add(Dense(8, activation='relu')) model.add(Dense(4)) model.add(Dense(2, activation='sigmoid')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) filepath = "model.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min') earlystopping = EarlyStopping(monitor='loss', patience=20, restore_best_weights=True) desired_callbacks = [checkpoint, earlystopping] # history = model.fit(X_train, y_train, validation_split=0.2, batch_size=50, epochs=1000, shuffle=True, # callbacks=desired_callbacks) # plt.plot(history.history['loss']) # plt.plot(history.history['val_loss']) # plt.title('model loss') # plt.ylabel('loss') # plt.xlabel('epoch') # plt.legend(['train', 'test'], loc='upper left') # plt.savefig("loss.jpg") model = load_model("model.hdf5") model.summary() scores = model.evaluate(X_train, y_train, verbose=0) print( 'Accuracy on training data: {}% \n Error on training data: {}'.format( scores[1], 1 - scores[1])) scores2 = model.evaluate(X_test, y_test, verbose=0) print('Accuracy on test data: {}% \n Error on test data: {}'.format( scores2[1], 1 - scores2[1]))
def train_nn_regressor_tf(self, x, y, architecture=(512, 256, 128), ndim=7): model = Sequential() model.add( Dense(architecture[0], input_dim=ndim, kernel_initializer='normal', activation=tf.nn.leaky_relu)) for i in architecture[1:]: model.add(Dense(i, activation=tf.nn.leaky_relu)) model.add(Dense(1, activation='linear')) model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae']) model.summary() history = model.fit(np.asarray(x), np.asarray(y), epochs=30, batch_size=150, verbose=1, validation_split=0.2) self.val_loss_list.append(history.history['loss']) self.number_of_samples.append(len(self.x)) return model
def build_model_CNN(self): ''' CNN ''' model = Sequential() embedding_layer = Embedding(self.vocab_length, 300, weights=[self.embedding_matrix], input_length=self.length_long_sentence, trainable=False) model.add(embedding_layer) model.add( Conv1D(filters=150, kernel_regularizer=l2(0.01), kernel_size=5, strides=1, padding='valid')) model.add(MaxPooling1D(2, padding='valid')) model.add( Conv1D(filters=150, kernel_regularizer=l2(0.01), kernel_size=5, strides=1, padding='valid')) model.add(MaxPooling1D(2, padding='valid')) model.add(Flatten()) model.add(Dense(80, kernel_regularizer=l2(0.01), activation='relu')) model.add(Dense(40, kernel_regularizer=l2(0.01), activation='relu')) model.add(Dense(20, kernel_regularizer=l2(0.01), activation='relu')) model.add(Dense(2, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['accuracy']) model.summary()
def create_CNNmodel(): # Model model = Sequential() # Add convolution 2D model.add( Conv2D(32, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal', input_shape=(ROWS, COLS, 1))) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(CLASSES, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) model.summary() return model
def _cnn_lstm_model(input_length, num_classes, num_features, embedding_matrix, embedding_dim, filters_num=512, filter_sizes=None, dropout_rate=0.5): if filter_sizes is None: filter_sizes = [5] op_units, op_activation = num_classes, 'softmax' model = Sequential() model.add( Embedding(input_dim=num_features, output_dim=embedding_dim, input_length=input_length, weights=[embedding_matrix], trainable=False)) model.add( Bidirectional(LSTM(units=int(embedding_dim / 2), return_sequences=True), input_shape=(-1, embedding_dim))) model.add(Flatten()) model.add(Dropout(rate=dropout_rate)) model.add(Dense(units=op_units, activation=op_activation)) loss = 'sparse_categorical_crossentropy' optimizer = Adam() model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy']) model.summary() return model
def prepare_model(embeddings): shared_model = Sequential() shared_model.add( Embedding(len(embeddings), embedding_dim, weights=[embeddings], input_shape=(max_seq_length, ), trainable=False)) shared_model.add(LSTM(n_hidden)) q1_input = Input(shape=(max_seq_length, ), dtype='int32') q2_input = Input(shape=(max_seq_length, ), dtype='int32') malstm_distance = ManhattanDistance()( [shared_model(q1_input), shared_model(q2_input)]) model = Model(inputs=[q1_input, q2_input], outputs=[malstm_distance]) # if gpus >= 2: # model = tf.keras.utils.multi_gpu_model(model, gpus=gpus) model.compile(loss='mean_squared_error', optimizer=Adam(), metrics=['accuracy']) model.summary() shared_model.summary() return model
def initialize_model(): model = Sequential() model.add( Conv2D(40, 11, strides=1, padding='same', input_shape=(1, 1024, 4))) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(Conv2D(40, 11, strides=1, padding='same')) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(1, 64))) model.add(Flatten()) model.add(Dense(units=500)) model.add(Dense(units=640)) model.add(Reshape((1, 16, 40))) model.add(Conv2DTranspose(40, 11, strides=(1, 64), padding='same')) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(Conv2DTranspose(40, 11, strides=(1, 1), padding='same')) model.add(BatchNormalization(axis=-1)) model.add(Activation('relu')) model.add(Conv2D(4, 11, strides=1, padding='same', activation='sigmoid')) model.summary() model.compile(optimizer='adam', loss='mse') return model
def build_encoder(input_shape, z_size, n_filters, n_layers): """Encoder구축 Arguments: input_shape (int): 이미지의 shape z_size (int): 특징 공간의 차원 수 n_filters (int): 필터 수 """ model = Sequential() model.add( Conv2D(n_filters, 3, activation='elu', input_shape=input_shape, padding='same')) model.add(Conv2D(n_filters, 3, padding='same')) for i in range(2, n_layers + 1): model.add( Conv2D(i * n_filters, 3, activation='elu', padding='same')) model.add( Conv2D(i * n_filters, 3, activation='elu', padding='same')) model.add(Conv2D(n_layers * n_filters, 3, padding='same')) model.add(Flatten()) model.add(Dense(z_size)) model.summary() return model
def initialize_model(): one_filter_keras_model = Sequential() one_filter_keras_model.add( Conv2D(filters=40, kernel_size=(1, 11), padding="same", input_shape=(1, 1500, 5), kernel_constraint=NonNeg())) one_filter_keras_model.add(BatchNormalization(axis=-1)) one_filter_keras_model.add(Activation('relu')) one_filter_keras_model.add(MaxPooling2D(pool_size=(1, 30))) one_filter_keras_model.add(Flatten()) one_filter_keras_model.add(Dense(40)) one_filter_keras_model.add(BatchNormalization(axis=-1)) one_filter_keras_model.add(Activation('relu')) one_filter_keras_model.add(Dropout(0.5)) one_filter_keras_model.add(Dense(1)) one_filter_keras_model.add(Activation("sigmoid")) one_filter_keras_model.summary() one_filter_keras_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[precision, recall, specificity]) return one_filter_keras_model
def create_generator(): model = Sequential() # Reshape input into 32x32x256 tensor via a fully connected layer model.add(Dense(64 * 32 * 32, input_dim=z_dim)) model.add(Reshape((32, 32, 64))) model.add( Conv2DTranspose( # Transposed convolution layer, from 32x32x256 into 64x64x128 tensor 128, kernel_size=3, strides=2, padding='same')) model.add(BatchNormalization()) # Batch normalization model.add(LeakyReLU(alpha=0.01)) # Leaky ReLU model.add( Conv2DTranspose( # Transposed convolution layer, from 64x64x128 to 128x128x64 tensor 64, kernel_size=3, strides=2, padding='same')) model.add(BatchNormalization()) # Batch normalization model.add(LeakyReLU(alpha=0.01)) # Leaky ReLU model.add( Conv2DTranspose( # Transposed convolution layer, from 128x128x64 to 256x256x3 tensor 3, kernel_size=3, strides=2, padding='same')) model.add(Activation('tanh')) # Tanh activation model.compile(loss='binary_crossentropy', optimizer=adam_optimizer()) model.summary() return model
def generate(self): train = pd.read_csv(self.train_path) test = pd.read_csv(self.test_path) X_train, y_train, _ = self.preprocess_data(train) X_test, y_test, _ = self.preprocess_data(test) model = Sequential() model.add( Conv2D(kernel_size=5, strides=1, filters=32, activation='relu', name='conv_layer1', input_shape=self.img_shape_keras, padding='same')) model.add(MaxPooling2D(pool_size=2, strides=2)) model.add(Dropout(0.5)) model.add( Conv2D(kernel_size=5, strides=1, filters=64, activation='relu', name='conv_layer2', padding='same')) model.add(MaxPooling2D(pool_size=2, strides=2)) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(self.num_classes, activation='softmax')) optimizer = Adam(lr=0.0001) model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) early_stopping_monitor = EarlyStopping(patience=2, monitor='val_acc') checkpoint = ModelCheckpoint(filepath='model.h5', monitor='val_acc', verbose=1, save_best_only=True, mode='max') history = model.fit(X_train, y_train, validation_split=0.3, batch_size=200, epochs=50, callbacks=[early_stopping_monitor, checkpoint], verbose=2) model.summary() score = model.evaluate(X_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) self.plot_accuracy(history) self.plot_loss(history)
class SarcasmCNN: """ End-to-end convolutional neural network pipeline for sarcasm classification on tweet corpus """ def __init__(self, data, vocab_processor): """ Constructor for SarcasmCNN Params: data """ # instantiate model (self.X_train, self.y_train), (self.X_test, self.y_test) = data self.vocab_processor = vocab_processor self.epochs = EPOCHS self.batch_size = BATCH_SIZE self.history = None self.loss = None self.accuracy = None self.model = Sequential() self._add_model_layers() def _add_model_layers(self): self.model.add(layers.Embedding(input_dim=len(self.vocab_processor.vocabulary_), output_dim=self.X_train.shape[1])) self.model.add(layers.Conv1D(128, 5, activation='relu')) self.model.add(layers.GlobalMaxPooling1D()) self.model.add(layers.Dense(10, activation='relu')) self.model.add(layers.Dense(1, activation='sigmoid')) def fit(self): self.model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) self.model.summary() self.history = self.model.fit(self.X_train, self.y_train, epochs=self.epochs, verbose=True, validation_data=(self.X_test, self.y_test), batch_size=self.batch_size) def evaluate(self, X, y): loss, accuracy = self.model.evaluate(X, y, verbose=False) print("Testing Accuracy: {:.4f}".format(accuracy)) print(f"Testing Loss: {loss:.4f}") def run(self): self.model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) self.model.summary() self.history = self.model.fit(self.X_train, self.y_train, epochs=self.epochs, verbose=True, validation_data=(self.X_test, self.y_test), batch_size=self.batch_size) self.loss, self.accuracy = self.model.evaluate(self.X_test, self.y_test, verbose=False) print("Testing Accuracy: {:.4f}".format(self.accuracy)) print(f"Testing Loss: {self.loss:.4f}")
def createRegularizedModel( self, inputs, outputs, hiddenLayers, # List of nodes at each hidden layer activationType, learningRate ): bias = True dropout = 0 regularizationFactor = 0.01 model = Sequential() if len(hiddenLayers) == 0: model.add( Dense(self.output_size, input_shape=(self.input_size,), init='lecun_uniform',bias=bias) ) model.add( Activation("linear") ) else: if regularizationFactor > 0: model.add( Dense(hiddenLayers[0], input_shape=(self.input_size,), init='lecun_uniform', W_regularizer=l2(regularizationFactor), bias=bias) ) else: model.add( Dense(hiddenLayers[0], input_shape=(self.input_size,), init='lecun_uniform', bias=bias) ) if activationType == 'LeakyReLU': model.add( LeakyReLU(alpha=0.01) ) else: model.add( Activation(activationType) ) for index in range(1,len(hiddenLayers)): layerSize = hiddenSize[index] if regularizationFactor > 0.0: model.add( Dense(hiddenLayers[index], init='lecun_uniform', W_regularizer=l2(regularizationFactor), bias=bias) ) else: model.add( Dense(hiddenLayers[index], init='lecun_uniform', bias=bias) ) if activationType == "LeakyReLU": model.add( LeakyReLU(alpha=0.01) ) else: model.add( Activation(activationType) ) if dropout > 0: model.add( Dropout(dropout) ) model.add( Dense(self.output_size, init='lecun_uniform', bias=bias) ) model.add( Activation("linear") ) optimizer = optimizers.RMSprop( lr = learningRate, rho = 0.9, epsilon = 1e-6 ) model.compile( loss = "mse", optimizer = optimizer ) model.summary() return model
def google_net(size=256, kernel=3): model = Sequential() model.add(Conv2D(32, (kernel, kernel), activation='relu', input_shape=(size, size, 3), strides=2, kernel_regularizer=regularizers.l2(0.01), name='cv1')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (kernel, kernel), activation='relu', strides=2, kernel_regularizer=regularizers.l2(0.01), name='cv2')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, (kernel, kernel), activation='relu', strides=2, name='cv3.3')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(256, kernel_regularizer=regularizers.l2(0.01), name='features')) model.add(Activation('relu')) model.add(Dense(3, activation='softmax', name='denseout')) print(model.summary()) model.compile( loss='categorical_crossentropy', optimizer=RMSprop(lr=1e-4, decay=0.1e-6), metrics=['accuracy']) return model
def get_model_1(x,y,Vocab_size,maxlen): model = Sequential() model.add(Embedding(Vocab_size, 50, input_length=maxlen)) model.add(LSTM(128, return_sequences=True)) model.add(LSTM(128)) model.add(Dense(128, activation='relu')) model.add(Dense(Vocab_size)) model.add(Activation("softmax")) print(model.summary()) model.compile(loss="sparse_categorical_crossentropy", optimizer='adam', metrics=['accuracy']) return model
def get_model_1(x,y,Vocab_size,maxlen): model = Sequential() model.add(Embedding(Vocab_size, 30)) #input_length=maxlen)) model.add(LSTM(60, return_sequences=True)) model.add(Dense(60, activation='relu')) model.add(Dense(Vocab_size)) model.add(Activation("softmax")) print(model.summary()) optimizer = RMSprop(lr=0.01) model.compile(loss="sparse_categorical_crossentropy", optimizer='adam') return model
class RNNGRU(object): """Process data for ingestion.""" def __init__( self, data, sequence_length=20, warmup_steps=50, dropout=0, layers=1, patience=10, units=512, display=False): """Instantiate the class. Args: data: Tuple of (x_data, y_data, target_names) batch_size: Size of batch sequence_length: Length of vectors for for each target warmup_steps: Returns: None """ # Initialize key variables self._warmup_steps = warmup_steps self._data = data self.display = display path_checkpoint = '/tmp/checkpoint.keras' _layers = int(abs(layers)) # Delete any stale checkpoint file if os.path.exists(path_checkpoint) is True: os.remove(path_checkpoint) ################################### # TensorFlow wizardry config = tf.ConfigProto() # Don't pre-allocate memory; allocate as-needed config.gpu_options.allow_growth = True # Only allow a total of half the GPU memory to be allocated config.gpu_options.per_process_gpu_memory_fraction = 0.95 # Crash with DeadlineExceeded instead of hanging forever when your # queues get full/empty config.operation_timeout_in_ms = 60000 # Create a session with the above options specified. backend.tensorflow_backend.set_session(tf.Session(config=config)) ################################### # Get data self._y_current = self._data.close() # Create training arrays x_train = self._data.vectors_train() self._y_train = self._data.classes_train() # Create test arrays for VALIDATION and EVALUATION xv_test = self._data.vectors_test() self._yv_test = self._data.classes_test() (self.training_rows, self._training_vector_count) = x_train.shape (self.test_rows, _) = xv_test.shape (_, self._training_class_count) = self._y_train.shape # Print stuff print('\n> Numpy Data Type: {}'.format(type(x_train))) print("> Numpy Data Shape: {}".format(x_train.shape)) print("> Numpy Data Row[0]: {}".format(x_train[0])) print("> Numpy Data Row[Last]: {}".format(x_train[-1])) print('> Numpy Targets Type: {}'.format(type(self._y_train))) print("> Numpy Targets Shape: {}".format(self._y_train.shape)) print('> Number of Samples: {}'.format(self._y_current.shape[0])) print('> Number of Training Samples: {}'.format(x_train.shape[0])) print('> Number of Training Classes: {}'.format( self._training_class_count)) print('> Number of Test Samples: {}'.format(self.test_rows)) print("> Training Minimum Value:", np.min(x_train)) print("> Training Maximum Value:", np.max(x_train)) print('> Number X signals: {}'.format(self._training_vector_count)) print('> Number Y signals: {}'.format(self._training_class_count)) # Print epoch related data print('> Epochs:', self._data.epochs()) print('> Batch Size:', self._data.batch_size()) print('> Steps:', self._data.epoch_steps()) # Display estimated memory footprint of training data. print("> Data size: {:.2f} Bytes".format(x_train.nbytes)) ''' The neural network works best on values roughly between -1 and 1, so we need to scale the data before it is being input to the neural network. We can use scikit-learn for this. We first create a scaler-object for the input-signals. Then we detect the range of values from the training-data and scale the training-data. ''' self._x_scaler = MinMaxScaler() self._x_train_scaled = self._x_scaler.fit_transform(x_train) print('> Scaled Training Minimum Value: {}'.format( np.min(self._x_train_scaled))) print('> Scaled Training Maximum Value: {}'.format( np.max(self._x_train_scaled))) self._xv_test_scaled = self._x_scaler.transform(xv_test) ''' The target-data comes from the same data-set as the input-signals, because it is the weather-data for one of the cities that is merely time-shifted. But the target-data could be from a different source with different value-ranges, so we create a separate scaler-object for the target-data. ''' self._y_scaler = MinMaxScaler() self._y_train_scaled = self._y_scaler.fit_transform(self._y_train) yv_test_scaled = self._y_scaler.transform(self._yv_test) # Data Generator ''' The data-set has now been prepared as 2-dimensional numpy arrays. The training-data has almost 300k observations, consisting of 20 input-signals and 3 output-signals. These are the array-shapes of the input and output data: ''' print('> Scaled Training Data Shape: {}'.format( self._x_train_scaled.shape)) print('> Scaled Training Targets Shape: {}'.format( self._y_train_scaled.shape)) # We then create the batch-generator. generator = self._batch_generator( self._data.batch_size(), sequence_length) # Validation Set ''' The neural network trains quickly so we can easily run many training epochs. But then there is a risk of overfitting the model to the training-set so it does not generalize well to unseen data. We will therefore monitor the model's performance on the test-set after each epoch and only save the model's weights if the performance is improved on the test-set. The batch-generator randomly selects a batch of short sequences from the training-data and uses that during training. But for the validation-data we will instead run through the entire sequence from the test-set and measure the prediction accuracy on that entire sequence. ''' validation_data = (np.expand_dims(self._xv_test_scaled, axis=0), np.expand_dims(yv_test_scaled, axis=0)) # Create the Recurrent Neural Network self._model = Sequential() ''' We can now add a Gated Recurrent Unit (GRU) to the network. This will have 512 outputs for each time-step in the sequence. Note that because this is the first layer in the model, Keras needs to know the shape of its input, which is a batch of sequences of arbitrary length (indicated by None), where each observation has a number of input-signals (num_x_signals). ''' self._model.add(GRU( units=units, return_sequences=True, recurrent_dropout=dropout, input_shape=(None, self._training_vector_count,))) for _ in range(0, _layers): self._model.add(GRU( units=units, recurrent_dropout=dropout, return_sequences=True)) ''' The GRU outputs a batch of sequences of 512 values. We want to predict 3 output-signals, so we add a fully-connected (or dense) layer which maps 512 values down to only 3 values. The output-signals in the data-set have been limited to be between 0 and 1 using a scaler-object. So we also limit the output of the neural network using the Sigmoid activation function, which squashes the output to be between 0 and 1.''' self._model.add( Dense(self._training_class_count, activation='sigmoid')) ''' A problem with using the Sigmoid activation function, is that we can now only output values in the same range as the training-data. For example, if the training-data only has temperatures between -20 and +30 degrees, then the scaler-object will map -20 to 0 and +30 to 1. So if we limit the output of the neural network to be between 0 and 1 using the Sigmoid function, this can only be mapped back to temperature values between -20 and +30. We can use a linear activation function on the output instead. This allows for the output to take on arbitrary values. It might work with the standard initialization for a simple network architecture, but for more complicated network architectures e.g. with more layers, it might be necessary to initialize the weights with smaller values to avoid NaN values during training. You may need to experiment with this to get it working. ''' if False: # Maybe use lower init-ranges. # init = RandomUniform(minval=-0.05, maxval=0.05) init = RandomUniform(minval=-0.05, maxval=0.05) self._model.add(Dense( self._training_class_count, activation='linear', kernel_initializer=init)) # Compile Model ''' This is the optimizer and the beginning learning-rate that we will use. We then compile the Keras model so it is ready for training. ''' optimizer = RMSprop(lr=1e-3) self._model.compile( loss=self._loss_mse_warmup, optimizer=optimizer, metrics=['accuracy']) ''' This is a very small model with only two layers. The output shape of (None, None, 3) means that the model will output a batch with an arbitrary number of sequences, each of which has an arbitrary number of observations, and each observation has 3 signals. This corresponds to the 3 target signals we want to predict. ''' print('> Model Summary:\n') print(self._model.summary()) # Callback Functions ''' During training we want to save checkpoints and log the progress to TensorBoard so we create the appropriate callbacks for Keras. This is the callback for writing checkpoints during training. ''' callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint, monitor='val_loss', verbose=1, save_weights_only=True, save_best_only=True) ''' This is the callback for stopping the optimization when performance worsens on the validation-set. ''' callback_early_stopping = EarlyStopping(monitor='val_loss', patience=patience, verbose=1) ''' This is the callback for writing the TensorBoard log during training. ''' callback_tensorboard = TensorBoard(log_dir='/tmp/23_logs/', histogram_freq=0, write_graph=False) ''' This callback reduces the learning-rate for the optimizer if the validation-loss has not improved since the last epoch (as indicated by patience=0). The learning-rate will be reduced by multiplying it with the given factor. We set a start learning-rate of 1e-3 above, so multiplying it by 0.1 gives a learning-rate of 1e-4. We don't want the learning-rate to go any lower than this. ''' callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_lr=1e-4, patience=0, verbose=1) callbacks = [callback_early_stopping, callback_checkpoint, callback_tensorboard, callback_reduce_lr] # Train the Recurrent Neural Network '''We can now train the neural network. Note that a single "epoch" does not correspond to a single processing of the training-set, because of how the batch-generator randomly selects sub-sequences from the training-set. Instead we have selected steps_per_epoch so that one "epoch" is processed in a few minutes. With these settings, each "epoch" took about 2.5 minutes to process on a GTX 1070. After 14 "epochs" the optimization was stopped because the validation-loss had not decreased for 5 "epochs". This optimization took about 35 minutes to finish. Also note that the loss sometimes becomes NaN (not-a-number). This is often resolved by restarting and running the Notebook again. But it may also be caused by your neural network architecture, learning-rate, batch-size, sequence-length, etc. in which case you may have to modify those settings. ''' print('\n> Starting data training\n') self._history = self._model.fit_generator( generator=generator, epochs=self._data.epochs(), steps_per_epoch=self._data.epoch_steps(), validation_data=validation_data, callbacks=callbacks) # Load Checkpoint ''' Because we use early-stopping when training the model, it is possible that the model's performance has worsened on the test-set for several epochs before training was stopped. We therefore reload the last saved checkpoint, which should have the best performance on the test-set. ''' print('> Loading model weights') if os.path.exists(path_checkpoint): self._model.load_weights(path_checkpoint) # Performance on Test-Set ''' We can now evaluate the model's performance on the test-set. This function expects a batch of data, but we will just use one long time-series for the test-set, so we just expand the array-dimensionality to create a batch with that one sequence. ''' result = self._model.evaluate( x=np.expand_dims(self._xv_test_scaled, axis=0), y=np.expand_dims(yv_test_scaled, axis=0)) print('> Loss (test-set): {}'.format(result)) # If you have several metrics you can use this instead. if False: for res, metric in zip(result, self._model.metrics_names): print('{0}: {1:.3e}'.format(metric, res)) def _batch_generator(self, batch_size, sequence_length): """Create generator function to create random batches of training-data. Args: batch_size: Size of batch sequence_length: Length of sequence Returns: (x_batch, y_batch) """ # Infinite loop. while True: # Allocate a new array for the batch of input-signals. x_shape = ( batch_size, sequence_length, self._training_vector_count) x_batch = np.zeros(shape=x_shape, dtype=np.float16) # Allocate a new array for the batch of output-signals. y_shape = (batch_size, sequence_length, self._training_class_count) y_batch = np.zeros(shape=y_shape, dtype=np.float16) # Fill the batch with random sequences of data. for i in range(batch_size): # Get a random start-index. # This points somewhere into the training-data. idx = np.random.randint( self.training_rows - sequence_length) # Copy the sequences of data starting at this index. x_batch[i] = self._x_train_scaled[idx:idx+sequence_length] y_batch[i] = self._y_train_scaled[idx:idx+sequence_length] yield (x_batch, y_batch) def _loss_mse_warmup(self, y_true, y_pred): """Calculate the Mean Squared Errror. Calculate the Mean Squared Error between y_true and y_pred, but ignore the beginning "warmup" part of the sequences. We will use Mean Squared Error (MSE) as the loss-function that will be minimized. This measures how closely the model's output matches the true output signals. However, at the beginning of a sequence, the model has only seen input-signals for a few time-steps, so its generated output may be very inaccurate. Using the loss-value for the early time-steps may cause the model to distort its later output. We therefore give the model a "warmup-period" of 50 time-steps where we don't use its accuracy in the loss-function, in hope of improving the accuracy for later time-steps Args: y_true: Desired output. y_pred: Model's output. Returns: loss_mean: Mean Squared Error """ warmup_steps = self._warmup_steps # The shape of both input tensors are: # [batch_size, sequence_length, num_y_signals]. # Ignore the "warmup" parts of the sequences # by taking slices of the tensors. y_true_slice = y_true[:, warmup_steps:, :] y_pred_slice = y_pred[:, warmup_steps:, :] # These sliced tensors both have this shape: # [batch_size, sequence_length - warmup_steps, num_y_signals] # Calculate the MSE loss for each value in these tensors. # This outputs a 3-rank tensor of the same shape. loss = tf.losses.mean_squared_error(labels=y_true_slice, predictions=y_pred_slice) # Keras may reduce this across the first axis (the batch) # but the semantics are unclear, so to be sure we use # the loss across the entire tensor, we reduce it to a # single scalar with the mean function. loss_mean = tf.reduce_mean(loss) return loss_mean def plot_train(self, start_idx, length=100): """Plot the predicted and true output-signals. Args: start_idx: Start-index for the time-series. length: Sequence-length to process and plot. Returns: None """ # Plot self._plot_comparison(start_idx, length=length, train=True) def plot_test(self, start_idx, length=100): """Plot the predicted and true output-signals. Args: start_idx: Start-index for the time-series. length: Sequence-length to process and plot. Returns: None """ # Plot self._plot_comparison(start_idx, length=length, train=False) def _plot_comparison(self, start_idx, length=100, train=True): """Plot the predicted and true output-signals. Args: start_idx: Start-index for the time-series. length: Sequence-length to process and plot. train: Boolean whether to use training- or test-set. Returns: None """ # Initialize key variables datetimes = {} num_train = self.training_rows # End-index for the sequences. end_idx = start_idx + length # Variables for date formatting days = mdates.DayLocator() # Every day months = mdates.MonthLocator() # Every month months_format = mdates.DateFormatter('%b %Y') days_format = mdates.DateFormatter('%d') # Assign other variables dependent on the type of data we are plotting if train is True: # Use training-data. x_values = self._x_train_scaled[start_idx:end_idx] y_true = self._y_train[start_idx:end_idx] shim = 'Train' # Datetimes to use for training datetimes[shim] = self._data.datetime()[ :num_train][start_idx:end_idx] else: # Scale the data x_test_scaled = self._x_scaler.transform( self._data.vectors_test_all()) # Use test-data. x_values = x_test_scaled[start_idx:end_idx] y_true = self._yv_test[start_idx:end_idx] shim = 'Test' # Datetimes to use for testing datetimes[shim] = self._data.datetime()[ num_train:][start_idx:end_idx] # Input-signals for the model. x_values = np.expand_dims(x_values, axis=0) # Use the model to predict the output-signals. y_pred = self._model.predict(x_values) # The output of the model is between 0 and 1. # Do an inverse map to get it back to the scale # of the original data-set. y_pred_rescaled = self._y_scaler.inverse_transform(y_pred[0]) # For each output-signal. for signal in range(len(self._data.labels())): # Assign other variables dependent on the type of data plot if train is True: # Only get current values that are a part of the training data current = self._y_current[:num_train][start_idx:end_idx] # The number of datetimes for the 'actual' plot must match # that of current values datetimes['actual'] = self._data.datetime()[ :num_train][start_idx:end_idx] else: # Only get current values that are a part of the test data. current = self._y_current[ num_train:][start_idx:] # The number of datetimes for the 'actual' plot must match # that of current values datetimes['actual'] = self._data.datetime()[ num_train:][start_idx:] # Create a filename filename = ( '/tmp/batch_{}_epochs_{}_training_{}_{}_{}_{}.png').format( self._data.batch_size(), self._data.epochs(), num_train, signal, int(time.time()), shim) # Get the output-signal predicted by the model. signal_pred = y_pred_rescaled[:, signal] # Get the true output-signal from the data-set. signal_true = y_true[:, signal] # Create a new chart (fig, axis) = plt.subplots(figsize=(15, 5)) # Plot and compare the two signals. axis.plot( datetimes[shim][:len(signal_true)], signal_true, label='Current +{}'.format(self._data.labels()[signal])) axis.plot( datetimes[shim][:len(signal_pred)], signal_pred, label='Prediction') axis.plot(datetimes['actual'], current, label='Current') # Set plot labels and titles axis.set_title('{1}ing Forecast ({0} Future Intervals)'.format( self._data.labels()[signal], shim)) axis.set_ylabel('Values') axis.legend( bbox_to_anchor=(1.04, 0.5), loc='center left', borderaxespad=0) # Add gridlines and ticks ax = plt.gca() ax.grid(True) # Add major gridlines ax.xaxis.grid(which='major', color='black', alpha=0.2) ax.yaxis.grid(which='major', color='black', alpha=0.2) # Add minor ticks (They must be turned on first) ax.minorticks_on() ax.xaxis.grid(which='minor', color='black', alpha=0.1) ax.yaxis.grid(which='minor', color='black', alpha=0.1) # Format the tick labels ax.xaxis.set_major_locator(months) ax.xaxis.set_major_formatter(months_format) ax.xaxis.set_minor_locator(days) # Remove tick marks ax.tick_params(axis='both', which='both', length=0) # Print day numbers on xaxis for Test data only if train is False: ax.xaxis.set_minor_formatter(days_format) plt.setp(ax.xaxis.get_minorticklabels(), rotation=90) # Rotates and right aligns the x labels, and moves the bottom of # the axes up to make room for them fig.autofmt_xdate() # Plot grey box for warmup-period if we are working with training # data and the start is within the warmup-period if (0 < start_idx < self._warmup_steps): if train is True: plt.axvspan( datetimes[shim][start_idx], datetimes[shim][self._warmup_steps], facecolor='black', alpha=0.15) # Show and save the image if self.display is True: fig.savefig(filename, bbox_inches='tight') plt.show() else: fig.savefig(filename, bbox_inches='tight') print('> Saving file: {}'.format(filename)) # Close figure plt.close(fig=fig) def plot_accuracy(self): """Plot the predicted and true output-signals. Args: None Returns: None """ # Summarize history for accuracy plt.figure(figsize=(15, 5)) plt.plot(self._history.history['acc']) plt.plot(self._history.history['val_acc']) plt.title('Model Accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # Summarize history for loss plt.figure(figsize=(15, 5)) plt.plot(self._history.history['loss']) plt.plot(self._history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show()
class RNNGRU(object): """Process data for ingestion.""" def __init__( self, data, periods=288, batch_size=64, sequence_length=20, warmup_steps=50, epochs=20, display=False): """Instantiate the class. Args: data: Dict of values keyed by timestamp periods: Number of timestamp data points per vector batch_size: Size of batch sequence_length: Length of vectors for for each target warmup_steps: Returns: None """ # Initialize key variables self.periods = periods self.target_names = ['value'] self.warmup_steps = warmup_steps self.epochs = epochs self.batch_size = batch_size self.display = display ################################### # TensorFlow wizardry config = tf.ConfigProto() # Don't pre-allocate memory; allocate as-needed config.gpu_options.allow_growth = True # Only allow a total of half the GPU memory to be allocated config.gpu_options.per_process_gpu_memory_fraction = 0.95 # Crash with DeadlineExceeded instead of hanging forever when your # queues get full/empty config.operation_timeout_in_ms = 60000 # Create a session with the above options specified. backend.tensorflow_backend.set_session(tf.Session(config=config)) ################################### # Get data (x_data, y_data) = convert_data(data, periods, self.target_names) print('\n> Numpy Data Type: {}'.format(type(x_data))) print("> Numpy Data Shape: {}".format(x_data.shape)) print("> Numpy Data Row[0]: {}".format(x_data[0])) print('> Numpy Targets Type: {}'.format(type(y_data))) print("> Numpy Targets Shape: {}".format(y_data.shape)) ''' This is the number of observations (aka. data-points or samples) in the data-set: ''' num_data = len(x_data) ''' This is the fraction of the data-set that will be used for the training-set: ''' train_split = 0.9 ''' This is the number of observations in the training-set: ''' self.num_train = int(train_split * num_data) ''' This is the number of observations in the test-set: ''' num_test = num_data - self.num_train print('> Number of Samples: {}'.format(num_data)) print("> Number of Training Samples: {}".format(self.num_train)) print("> Number of Test Samples: {}".format(num_test)) # Create test and training data x_train = x_data[0:self.num_train] x_test = x_data[self.num_train:] self.y_train = y_data[0:self.num_train] self.y_test = y_data[self.num_train:] self.num_x_signals = x_data.shape[1] self.num_y_signals = y_data.shape[1] print("> Training Minimum Value:", np.min(x_train)) print("> Training Maximum Value:", np.max(x_train)) ''' steps_per_epoch is the number of batch iterations before a training epoch is considered finished. ''' self.steps_per_epoch = int(self.num_train / batch_size) + 1 print("> Epochs:", epochs) print("> Batch Size:", batch_size) print("> Steps:", self.steps_per_epoch) ''' Calculate the estimated memory footprint. ''' print("> Data size: {:.2f} Bytes".format(x_data.nbytes)) ''' if memory_footprint > 7: print('\n\n{}\n\n'.format( '> Estimated GPU memory usage too large. Use new parameters ' 'to reduce the footprint.')) sys.exit(0) ''' ''' The neural network works best on values roughly between -1 and 1, so we need to scale the data before it is being input to the neural network. We can use scikit-learn for this. We first create a scaler-object for the input-signals. Then we detect the range of values from the training-data and scale the training-data. ''' x_scaler = MinMaxScaler() self.x_train_scaled = x_scaler.fit_transform(x_train) print('> Scaled Training Minimum Value: {}'.format( np.min(self.x_train_scaled))) print('> Scaled Training Maximum Value: {}'.format( np.max(self.x_train_scaled))) self.x_test_scaled = x_scaler.transform(x_test) ''' The target-data comes from the same data-set as the input-signals, because it is the weather-data for one of the cities that is merely time-shifted. But the target-data could be from a different source with different value-ranges, so we create a separate scaler-object for the target-data. ''' self.y_scaler = MinMaxScaler() self.y_train_scaled = self.y_scaler.fit_transform(self.y_train) y_test_scaled = self.y_scaler.transform(self.y_test) # Data Generator ''' The data-set has now been prepared as 2-dimensional numpy arrays. The training-data has almost 300k observations, consisting of 20 input-signals and 3 output-signals. These are the array-shapes of the input and output data: ''' print('> Scaled Training Data Shape: {}'.format( self.x_train_scaled.shape)) print('> Scaled Training Targets Shape: {}'.format( self.y_train_scaled.shape)) # We then create the batch-generator. generator = self.batch_generator(batch_size, sequence_length) # Validation Set ''' The neural network trains quickly so we can easily run many training epochs. But then there is a risk of overfitting the model to the training-set so it does not generalize well to unseen data. We will therefore monitor the model's performance on the test-set after each epoch and only save the model's weights if the performance is improved on the test-set. The batch-generator randomly selects a batch of short sequences from the training-data and uses that during training. But for the validation-data we will instead run through the entire sequence from the test-set and measure the prediction accuracy on that entire sequence. ''' validation_data = (np.expand_dims(self.x_test_scaled, axis=0), np.expand_dims(y_test_scaled, axis=0)) # Create the Recurrent Neural Network self.model = Sequential() ''' We can now add a Gated Recurrent Unit (GRU) to the network. This will have 512 outputs for each time-step in the sequence. Note that because this is the first layer in the model, Keras needs to know the shape of its input, which is a batch of sequences of arbitrary length (indicated by None), where each observation has a number of input-signals (num_x_signals). ''' self.model.add(GRU( units=512, return_sequences=True, input_shape=(None, self.num_x_signals,))) ''' The GRU outputs a batch of sequences of 512 values. We want to predict 3 output-signals, so we add a fully-connected (or dense) layer which maps 512 values down to only 3 values. The output-signals in the data-set have been limited to be between 0 and 1 using a scaler-object. So we also limit the output of the neural network using the Sigmoid activation function, which squashes the output to be between 0 and 1.''' self.model.add(Dense(self.num_y_signals, activation='sigmoid')) ''' A problem with using the Sigmoid activation function, is that we can now only output values in the same range as the training-data. For example, if the training-data only has temperatures between -20 and +30 degrees, then the scaler-object will map -20 to 0 and +30 to 1. So if we limit the output of the neural network to be between 0 and 1 using the Sigmoid function, this can only be mapped back to temperature values between -20 and +30. We can use a linear activation function on the output instead. This allows for the output to take on arbitrary values. It might work with the standard initialization for a simple network architecture, but for more complicated network architectures e.g. with more layers, it might be necessary to initialize the weights with smaller values to avoid NaN values during training. You may need to experiment with this to get it working. ''' if False: # Maybe use lower init-ranges. # init = RandomUniform(minval=-0.05, maxval=0.05) init = RandomUniform(minval=-0.05, maxval=0.05) self.model.add(Dense( self.num_y_signals, activation='linear', kernel_initializer=init)) # Compile Model ''' This is the optimizer and the beginning learning-rate that we will use. We then compile the Keras model so it is ready for training. ''' optimizer = RMSprop(lr=1e-3) self.model.compile(loss=self.loss_mse_warmup, optimizer=optimizer) ''' This is a very small model with only two layers. The output shape of (None, None, 3) means that the model will output a batch with an arbitrary number of sequences, each of which has an arbitrary number of observations, and each observation has 3 signals. This corresponds to the 3 target signals we want to predict. ''' print('> Model Summary:\n') print(self.model.summary()) # Callback Functions ''' During training we want to save checkpoints and log the progress to TensorBoard so we create the appropriate callbacks for Keras. This is the callback for writing checkpoints during training. ''' path_checkpoint = '/tmp/23_checkpoint.keras' callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint, monitor='val_loss', verbose=1, save_weights_only=True, save_best_only=True) ''' This is the callback for stopping the optimization when performance worsens on the validation-set. ''' callback_early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1) ''' This is the callback for writing the TensorBoard log during training. ''' callback_tensorboard = TensorBoard(log_dir='/tmp/23_logs/', histogram_freq=0, write_graph=False) ''' This callback reduces the learning-rate for the optimizer if the validation-loss has not improved since the last epoch (as indicated by patience=0). The learning-rate will be reduced by multiplying it with the given factor. We set a start learning-rate of 1e-3 above, so multiplying it by 0.1 gives a learning-rate of 1e-4. We don't want the learning-rate to go any lower than this. ''' callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_lr=1e-4, patience=0, verbose=1) callbacks = [callback_early_stopping, callback_checkpoint, callback_tensorboard, callback_reduce_lr] # Train the Recurrent Neural Network '''We can now train the neural network. Note that a single "epoch" does not correspond to a single processing of the training-set, because of how the batch-generator randomly selects sub-sequences from the training-set. Instead we have selected steps_per_epoch so that one "epoch" is processed in a few minutes. With these settings, each "epoch" took about 2.5 minutes to process on a GTX 1070. After 14 "epochs" the optimization was stopped because the validation-loss had not decreased for 5 "epochs". This optimization took about 35 minutes to finish. Also note that the loss sometimes becomes NaN (not-a-number). This is often resolved by restarting and running the Notebook again. But it may also be caused by your neural network architecture, learning-rate, batch-size, sequence-length, etc. in which case you may have to modify those settings. ''' print('\n> Starting data training\n') try: self.model.fit_generator( generator=generator, epochs=self.epochs, steps_per_epoch=self.steps_per_epoch, validation_data=validation_data, callbacks=callbacks) except Exception as error: print('\n>{}\n'.format(error)) traceback.print_exc() sys.exit(0) # Load Checkpoint ''' Because we use early-stopping when training the model, it is possible that the model's performance has worsened on the test-set for several epochs before training was stopped. We therefore reload the last saved checkpoint, which should have the best performance on the test-set. ''' print('> Loading model weights') try: self.model.load_weights(path_checkpoint) except Exception as error: print('\n> Error trying to load checkpoint.\n\n{}'.format(error)) traceback.print_exc() sys.exit(0) # Performance on Test-Set ''' We can now evaluate the model's performance on the test-set. This function expects a batch of data, but we will just use one long time-series for the test-set, so we just expand the array-dimensionality to create a batch with that one sequence. ''' result = self.model.evaluate( x=np.expand_dims(self.x_test_scaled, axis=0), y=np.expand_dims(y_test_scaled, axis=0)) print('> Loss (test-set): {}'.format(result)) # If you have several metrics you can use this instead. if False: for res, metric in zip(result, self.model.metrics_names): print('{0}: {1:.3e}'.format(metric, res)) def batch_generator(self, batch_size, sequence_length): """Create generator function to create random batches of training-data. Args: batch_size: Size of batch sequence_length: Length of sequence Returns: (x_batch, y_batch) """ # Infinite loop. while True: # Allocate a new array for the batch of input-signals. x_shape = (batch_size, sequence_length, self.num_x_signals) x_batch = np.zeros(shape=x_shape, dtype=np.float16) # Allocate a new array for the batch of output-signals. y_shape = (batch_size, sequence_length, self.num_y_signals) y_batch = np.zeros(shape=y_shape, dtype=np.float16) # Fill the batch with random sequences of data. for i in range(batch_size): # Get a random start-index. # This points somewhere into the training-data. idx = np.random.randint(self.num_train - sequence_length) # Copy the sequences of data starting at this index. x_batch[i] = self.x_train_scaled[idx:idx+sequence_length] y_batch[i] = self.y_train_scaled[idx:idx+sequence_length] yield (x_batch, y_batch) def loss_mse_warmup(self, y_true, y_pred): """Calculate the Mean Squared Errror. Calculate the Mean Squared Error between y_true and y_pred, but ignore the beginning "warmup" part of the sequences. We will use Mean Squared Error (MSE) as the loss-function that will be minimized. This measures how closely the model's output matches the true output signals. However, at the beginning of a sequence, the model has only seen input-signals for a few time-steps, so its generated output may be very inaccurate. Using the loss-value for the early time-steps may cause the model to distort its later output. We therefore give the model a "warmup-period" of 50 time-steps where we don't use its accuracy in the loss-function, in hope of improving the accuracy for later time-steps Args: y_true: Desired output. y_pred: Model's output. Returns: loss_mean: Mean Squared Error """ warmup_steps = self.warmup_steps # The shape of both input tensors are: # [batch_size, sequence_length, num_y_signals]. # Ignore the "warmup" parts of the sequences # by taking slices of the tensors. y_true_slice = y_true[:, warmup_steps:, :] y_pred_slice = y_pred[:, warmup_steps:, :] # These sliced tensors both have this shape: # [batch_size, sequence_length - warmup_steps, num_y_signals] # Calculate the MSE loss for each value in these tensors. # This outputs a 3-rank tensor of the same shape. loss = tf.losses.mean_squared_error(labels=y_true_slice, predictions=y_pred_slice) # Keras may reduce this across the first axis (the batch) # but the semantics are unclear, so to be sure we use # the loss across the entire tensor, we reduce it to a # single scalar with the mean function. loss_mean = tf.reduce_mean(loss) return loss_mean def plot_comparison(self, start_idx, length=100, train=True): """Plot the predicted and true output-signals. Args: start_idx: Start-index for the time-series. length: Sequence-length to process and plot. train: Boolean whether to use training- or test-set. Returns: None """ if train: # Use training-data. x_values = self.x_train_scaled y_true = self.y_train shim = 'Train' else: # Use test-data. x_values = self.x_test_scaled y_true = self.y_test shim = 'Test' # End-index for the sequences. end_idx = start_idx + length # Select the sequences from the given start-index and # of the given length. x_values = x_values[start_idx:end_idx] y_true = y_true[start_idx:end_idx] # Input-signals for the model. x_values = np.expand_dims(x_values, axis=0) # Use the model to predict the output-signals. y_pred = self.model.predict(x_values) # The output of the model is between 0 and 1. # Do an inverse map to get it back to the scale # of the original data-set. y_pred_rescaled = self.y_scaler.inverse_transform(y_pred[0]) # For each output-signal. for signal in range(len(self.target_names)): # Create a filename filename = ( '/tmp/batch_{}_epochs_{}_training_{}_{}_{}_{}.png').format( self.batch_size, self.epochs, self.num_train, signal, int(time.time()), shim) # Get the output-signal predicted by the model. signal_pred = y_pred_rescaled[:, signal] # Get the true output-signal from the data-set. signal_true = y_true[:, signal] # Make the plotting-canvas bigger. plt.figure(figsize=(15, 5)) # Plot and compare the two signals. plt.plot(signal_true, label='true') plt.plot(signal_pred, label='pred') # Plot grey box for warmup-period. _ = plt.axvspan( 0, self.warmup_steps, facecolor='black', alpha=0.15) # Plot labels etc. plt.ylabel(self.target_names[signal]) plt.legend() # Show and save the image if self.display is True: plt.savefig(filename, bbox_inches='tight') plt.show() else: plt.savefig(filename, bbox_inches='tight') print('> Saving file: {}'.format(filename))
def model(self, params=None): """Create the Recurrent Neural Network. Args: None Returns: _model: RNN model """ # Initialize key variables if params is None: _hyperparameters = self.hyperparameters else: _hyperparameters = params # Calculate the steps per epoch epoch_steps = int( self.training_rows / _hyperparameters['batch_size']) + 1 # Create the model object _model = Sequential() ''' We can now add a Gated Recurrent Unit (GRU) to the network. This will have 512 outputs for each time-step in the sequence. Note that because this is the first layer in the model, Keras needs to know the shape of its input, which is a batch of sequences of arbitrary length (indicated by None), where each observation has a number of input-signals (num_x_signals). ''' _model.add(GRU( units=_hyperparameters['units'], return_sequences=True, recurrent_dropout=_hyperparameters['dropout'], input_shape=(None, self._training_vector_count,))) for _ in range(1, _hyperparameters['layers']): _model.add(GRU( units=_hyperparameters['units'], recurrent_dropout=_hyperparameters['dropout'], return_sequences=True)) ''' The GRU outputs a batch of sequences of 512 values. We want to predict 3 output-signals, so we add a fully-connected (or dense) layer which maps 512 values down to only 3 values. The output-signals in the data-set have been limited to be between 0 and 1 using a scaler-object. So we also limit the output of the neural network using the Sigmoid activation function, which squashes the output to be between 0 and 1. ''' _model.add( Dense(self._training_class_count, activation='sigmoid')) ''' A problem with using the Sigmoid activation function, is that we can now only output values in the same range as the training-data. For example, if the training-data only has values between -20 and +30, then the scaler-object will map -20 to 0 and +30 to 1. So if we limit the output of the neural network to be between 0 and 1 using the Sigmoid function, this can only be mapped back to values between -20 and +30. We can use a linear activation function on the output instead. This allows for the output to take on arbitrary values. It might work with the standard initialization for a simple network architecture, but for more complicated network architectures e.g. with more layers, it might be necessary to initialize the weights with smaller values to avoid NaN values during training. You may need to experiment with this to get it working. ''' if False: # Maybe use lower init-ranges. init = RandomUniform(minval=-0.05, maxval=0.05) _model.add(Dense( self._training_class_count, activation='linear', kernel_initializer=init)) # Compile Model ''' This is the optimizer and the beginning learning-rate that we will use. We then compile the Keras model so it is ready for training. ''' optimizer = RMSprop(lr=1e-3) _model.compile( loss=self._loss_mse_warmup, optimizer=optimizer, metrics=['accuracy']) ''' This is a very small model with only two layers. The output shape of (None, None, 3) means that the model will output a batch with an arbitrary number of sequences, each of which has an arbitrary number of observations, and each observation has 3 signals. This corresponds to the 3 target signals we want to predict. ''' print('\n> Model Summary:\n') print(_model.summary()) # Create the batch-generator. generator = self._batch_generator( _hyperparameters['batch_size'], _hyperparameters['sequence_length']) # Validation Set ''' The neural network trains quickly so we can easily run many training epochs. But then there is a risk of overfitting the model to the training-set so it does not generalize well to unseen data. We will therefore monitor the model's performance on the test-set after each epoch and only save the model's weights if the performance is improved on the test-set. The batch-generator randomly selects a batch of short sequences from the training-data and uses that during training. But for the validation-data we will instead run through the entire sequence from the test-set and measure the prediction accuracy on that entire sequence. ''' validation_data = (np.expand_dims(self._x_validation_scaled, axis=0), np.expand_dims(self._y_validation_scaled, axis=0)) # Callback Functions ''' During training we want to save checkpoints and log the progress to TensorBoard so we create the appropriate callbacks for Keras. This is the callback for writing checkpoints during training. ''' callback_checkpoint = ModelCheckpoint(filepath=self._path_checkpoint, monitor='val_loss', verbose=1, save_weights_only=True, save_best_only=True) ''' This is the callback for stopping the optimization when performance worsens on the validation-set. ''' callback_early_stopping = EarlyStopping( monitor='val_loss', patience=_hyperparameters['patience'], verbose=1) ''' This is the callback for writing the TensorBoard log during training. ''' callback_tensorboard = TensorBoard(log_dir='/tmp/23_logs/', histogram_freq=0, write_graph=False) ''' This callback reduces the learning-rate for the optimizer if the validation-loss has not improved since the last epoch (as indicated by patience=0). The learning-rate will be reduced by multiplying it with the given factor. We set a start learning-rate of 1e-3 above, so multiplying it by 0.1 gives a learning-rate of 1e-4. We don't want the learning-rate to go any lower than this. ''' callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_lr=1e-4, patience=0, verbose=1) callbacks = [callback_early_stopping, callback_checkpoint, callback_tensorboard, callback_reduce_lr] # Train the Recurrent Neural Network '''We can now train the neural network. Note that a single "epoch" does not correspond to a single processing of the training-set, because of how the batch-generator randomly selects sub-sequences from the training-set. Instead we have selected steps_per_epoch so that one "epoch" is processed in a few minutes. With these settings, each "epoch" took about 2.5 minutes to process on a GTX 1070. After 14 "epochs" the optimization was stopped because the validation-loss had not decreased for 5 "epochs". This optimization took about 35 minutes to finish. Also note that the loss sometimes becomes NaN (not-a-number). This is often resolved by restarting and running the Notebook again. But it may also be caused by your neural network architecture, learning-rate, batch-size, sequence-length, etc. in which case you may have to modify those settings. ''' print('\n> Parameters for training\n') pprint(_hyperparameters) print('\n> Starting data training\n') _model.fit_generator( generator=generator, epochs=_hyperparameters['epochs'], steps_per_epoch=epoch_steps, validation_data=validation_data, callbacks=callbacks) # Return return _model
def _model(self): """Create the Recurrent Neural Network. Args: None Returns: _model: RNN model """ # Create the model object _model = Sequential() ''' We can now add a Gated Recurrent Unit (GRU) to the network. This will have 512 outputs for each time-step in the sequence. Note that because this is the first layer in the model, Keras needs to know the shape of its input, which is a batch of sequences of arbitrary length (indicated by None), where each observation has a number of input-signals (num_x_signals). ''' _model.add(GRU( units=self._units, return_sequences=True, recurrent_dropout=self._dropout, input_shape=(None, self._training_vector_count,))) for _ in range(0, self._layers): _model.add(GRU( units=self._units, recurrent_dropout=self._dropout, return_sequences=True)) ''' The GRU outputs a batch of sequences of 512 values. We want to predict 3 output-signals, so we add a fully-connected (or dense) layer which maps 512 values down to only 3 values. The output-signals in the data-set have been limited to be between 0 and 1 using a scaler-object. So we also limit the output of the neural network using the Sigmoid activation function, which squashes the output to be between 0 and 1.''' _model.add( Dense(self._training_class_count, activation='sigmoid')) ''' A problem with using the Sigmoid activation function, is that we can now only output values in the same range as the training-data. For example, if the training-data only has temperatures between -20 and +30 degrees, then the scaler-object will map -20 to 0 and +30 to 1. So if we limit the output of the neural network to be between 0 and 1 using the Sigmoid function, this can only be mapped back to temperature values between -20 and +30. We can use a linear activation function on the output instead. This allows for the output to take on arbitrary values. It might work with the standard initialization for a simple network architecture, but for more complicated network architectures e.g. with more layers, it might be necessary to initialize the weights with smaller values to avoid NaN values during training. You may need to experiment with this to get it working. ''' if False: # Maybe use lower init-ranges. # init = RandomUniform(minval=-0.05, maxval=0.05) init = RandomUniform(minval=-0.05, maxval=0.05) _model.add(Dense( self._training_class_count, activation='linear', kernel_initializer=init)) # Compile Model ''' This is the optimizer and the beginning learning-rate that we will use. We then compile the Keras model so it is ready for training. ''' optimizer = RMSprop(lr=1e-3) _model.compile( loss=self._loss_mse_warmup, optimizer=optimizer, metrics=['accuracy']) ''' This is a very small model with only two layers. The output shape of (None, None, 3) means that the model will output a batch with an arbitrary number of sequences, each of which has an arbitrary number of observations, and each observation has 3 signals. This corresponds to the 3 target signals we want to predict. ''' print('> Model Summary:\n') print(_model.summary()) # Return return _model
x_test /= 255 print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) model = Sequential() model.add(Dense(512, activation='relu', input_shape=(784,))) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(num_classes, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) assert score[1] > 0.95
class RNNGRU(object): """Support vector machine class.""" def __init__(self, batch_size=64, sequence_length=20, warmup_steps=50, epochs=20, display=False): """Instantiate the class. Args: batch_size: Size of batch sequence_length: Length of vectors for for each target save: Save charts if True Returns: None """ # Initialize key variables self.target_names = ['Temp', 'WindSpeed', 'Pressure'] self.warmup_steps = warmup_steps self.epochs = epochs self.batch_size = batch_size self.display = display # Get data x_data, y_data = self.data() print('\n> Numpy Data Type: {}'.format(type(x_data))) print("> Numpy Data Shape: {}".format(x_data.shape)) print("> Numpy Data Row[0]: {}".format(x_data[0])) print('> Numpy Targets Type: {}'.format(type(y_data))) print("> Numpy Targets Shape: {}".format(y_data.shape)) ''' This is the number of observations (aka. data-points or samples) in the data-set: ''' num_data = len(x_data) ''' This is the fraction of the data-set that will be used for the training-set: ''' train_split = 0.9 ''' This is the number of observations in the training-set: ''' self.num_train = int(train_split * num_data) ''' This is the number of observations in the test-set: ''' num_test = num_data - self.num_train print('> Number of Samples: {}'.format(num_data)) print("> Number of Training Samples: {}".format(self.num_train)) print("> Number of Test Samples: {}".format(num_test)) print("> Batch Size: {}".format(batch_size)) steps_per_epoch = int(self.num_train/batch_size) print("> Recommended Epoch Steps: {:.2f}".format(steps_per_epoch)) # Create test and training data x_train = x_data[0:self.num_train] x_test = x_data[self.num_train:] self.y_train = y_data[0:self.num_train] self.y_test = y_data[self.num_train:] self.num_x_signals = x_data.shape[1] self.num_y_signals = y_data.shape[1] print("> Training Minimum Value:", np.min(x_train)) print("> Training Maximum Value:", np.max(x_train)) ''' The neural network works best on values roughly between -1 and 1, so we need to scale the data before it is being input to the neural network. We can use scikit-learn for this. We first create a scaler-object for the input-signals. Then we detect the range of values from the training-data and scale the training-data. ''' x_scaler = MinMaxScaler() self.x_train_scaled = x_scaler.fit_transform(x_train) print('> Scaled Training Minimum Value: {}'.format( np.min(self.x_train_scaled))) print('> Scaled Training Maximum Value: {}'.format( np.max(self.x_train_scaled))) self.x_test_scaled = x_scaler.transform(x_test) ''' The target-data comes from the same data-set as the input-signals, because it is the weather-data for one of the cities that is merely time-shifted. But the target-data could be from a different source with different value-ranges, so we create a separate scaler-object for the target-data. ''' self.y_scaler = MinMaxScaler() self.y_train_scaled = self.y_scaler.fit_transform(self.y_train) y_test_scaled = self.y_scaler.transform(self.y_test) # Data Generator ''' The data-set has now been prepared as 2-dimensional numpy arrays. The training-data has almost 300k observations, consisting of 20 input-signals and 3 output-signals. These are the array-shapes of the input and output data: ''' print('> Scaled Training Data Shape: {}'.format( self.x_train_scaled.shape)) print('> Scaled Training Targets Shape: {}'.format( self.y_train_scaled.shape)) # We then create the batch-generator. generator = self.batch_generator(batch_size, sequence_length) # Validation Set ''' The neural network trains quickly so we can easily run many training epochs. But then there is a risk of overfitting the model to the training-set so it does not generalize well to unseen data. We will therefore monitor the model's performance on the test-set after each epoch and only save the model's weights if the performance is improved on the test-set. The batch-generator randomly selects a batch of short sequences from the training-data and uses that during training. But for the validation-data we will instead run through the entire sequence from the test-set and measure the prediction accuracy on that entire sequence. ''' validation_data = (np.expand_dims(self.x_test_scaled, axis=0), np.expand_dims(y_test_scaled, axis=0)) # Create the Recurrent Neural Network self.model = Sequential() ''' We can now add a Gated Recurrent Unit (GRU) to the network. This will have 512 outputs for each time-step in the sequence. Note that because this is the first layer in the model, Keras needs to know the shape of its input, which is a batch of sequences of arbitrary length (indicated by None), where each observation has a number of input-signals (num_x_signals). ''' self.model.add(GRU( units=512, return_sequences=True, input_shape=(None, self.num_x_signals,))) ''' The GRU outputs a batch of sequences of 512 values. We want to predict 3 output-signals, so we add a fully-connected (or dense) layer which maps 512 values down to only 3 values. The output-signals in the data-set have been limited to be between 0 and 1 using a scaler-object. So we also limit the output of the neural network using the Sigmoid activation function, which squashes the output to be between 0 and 1.''' self.model.add(Dense(self.num_y_signals, activation='sigmoid')) ''' A problem with using the Sigmoid activation function, is that we can now only output values in the same range as the training-data. For example, if the training-data only has temperatures between -20 and +30 degrees, then the scaler-object will map -20 to 0 and +30 to 1. So if we limit the output of the neural network to be between 0 and 1 using the Sigmoid function, this can only be mapped back to temperature values between -20 and +30. We can use a linear activation function on the output instead. This allows for the output to take on arbitrary values. It might work with the standard initialization for a simple network architecture, but for more complicated network architectures e.g. with more layers, it might be necessary to initialize the weights with smaller values to avoid NaN values during training. You may need to experiment with this to get it working. ''' if False: # Maybe use lower init-ranges. init = RandomUniform(minval=-0.05, maxval=0.05) self.model.add(Dense( self.num_y_signals, activation='linear', kernel_initializer=init)) # Compile Model ''' This is the optimizer and the beginning learning-rate that we will use. We then compile the Keras model so it is ready for training. ''' optimizer = RMSprop(lr=1e-3) self.model.compile(loss=self.loss_mse_warmup, optimizer=optimizer) ''' This is a very small model with only two layers. The output shape of (None, None, 3) means that the model will output a batch with an arbitrary number of sequences, each of which has an arbitrary number of observations, and each observation has 3 signals. This corresponds to the 3 target signals we want to predict. ''' print('> Model Summary:\n') print(self.model.summary()) # Callback Functions ''' During training we want to save checkpoints and log the progress to TensorBoard so we create the appropriate callbacks for Keras. This is the callback for writing checkpoints during training. ''' path_checkpoint = '/tmp/23_checkpoint.keras' callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint, monitor='val_loss', verbose=1, save_weights_only=True, save_best_only=True) ''' This is the callback for stopping the optimization when performance worsens on the validation-set. ''' callback_early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1) ''' This is the callback for writing the TensorBoard log during training. ''' callback_tensorboard = TensorBoard(log_dir='/tmp/23_logs/', histogram_freq=0, write_graph=False) ''' This callback reduces the learning-rate for the optimizer if the validation-loss has not improved since the last epoch (as indicated by patience=0). The learning-rate will be reduced by multiplying it with the given factor. We set a start learning-rate of 1e-3 above, so multiplying it by 0.1 gives a learning-rate of 1e-4. We don't want the learning-rate to go any lower than this. ''' callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_lr=1e-4, patience=0, verbose=1) callbacks = [callback_early_stopping, callback_checkpoint, callback_tensorboard, callback_reduce_lr] # Train the Recurrent Neural Network '''We can now train the neural network. Note that a single "epoch" does not correspond to a single processing of the training-set, because of how the batch-generator randomly selects sub-sequences from the training-set. Instead we have selected steps_per_epoch so that one "epoch" is processed in a few minutes. With these settings, each "epoch" took about 2.5 minutes to process on a GTX 1070. After 14 "epochs" the optimization was stopped because the validation-loss had not decreased for 5 "epochs". This optimization took about 35 minutes to finish. Also note that the loss sometimes becomes NaN (not-a-number). This is often resolved by restarting and running the Notebook again. But it may also be caused by your neural network architecture, learning-rate, batch-size, sequence-length, etc. in which case you may have to modify those settings. ''' self.model.fit_generator( generator=generator, epochs=self.epochs, steps_per_epoch=steps_per_epoch, validation_data=validation_data, callbacks=callbacks) # Load Checkpoint ''' Because we use early-stopping when training the model, it is possible that the model's performance has worsened on the test-set for several epochs before training was stopped. We therefore reload the last saved checkpoint, which should have the best performance on the test-set. ''' try: self.model.load_weights(path_checkpoint) except Exception as error: print('\n> Error trying to load checkpoint.\n\n{}'.format(error)) sys.exit(0) # Performance on Test-Set ''' We can now evaluate the model's performance on the test-set. This function expects a batch of data, but we will just use one long time-series for the test-set, so we just expand the array-dimensionality to create a batch with that one sequence. ''' result = self.model.evaluate( x=np.expand_dims(self.x_test_scaled, axis=0), y=np.expand_dims(y_test_scaled, axis=0)) print('> Loss (test-set): {}'.format(result)) # If you have several metrics you can use this instead. if False: for res, metric in zip(result, self.model.metrics_names): print('{0}: {1:.3e}'.format(metric, res)) def batch_generator(self, batch_size, sequence_length): """Generator function for creating random batches of training-data. Args: batch_size: Size of batch sequence_length: Length of sequence Returns: (x_batch, y_batch) """ # Infinite loop. while True: # Allocate a new array for the batch of input-signals. x_shape = (batch_size, sequence_length, self.num_x_signals) x_batch = np.zeros(shape=x_shape, dtype=np.float16) # Allocate a new array for the batch of output-signals. y_shape = (batch_size, sequence_length, self.num_y_signals) y_batch = np.zeros(shape=y_shape, dtype=np.float16) # Fill the batch with random sequences of data. for i in range(batch_size): # Get a random start-index. # This points somewhere into the training-data. idx = np.random.randint(self.num_train - sequence_length) # Copy the sequences of data starting at this index. x_batch[i] = self.x_train_scaled[idx:idx+sequence_length] y_batch[i] = self.y_train_scaled[idx:idx+sequence_length] yield (x_batch, y_batch) def plot_comparison(self, start_idx, length=100, train=True): """Plot the predicted and true output-signals. Args: start_idx: Start-index for the time-series. length: Sequence-length to process and plot. train: Boolean whether to use training- or test-set. Returns: None """ if train: # Use training-data. x_values = self.x_train_scaled y_true = self.y_train shim = 'Train' else: # Use test-data. x_values = self.x_test_scaled y_true = self.y_test shim = 'Test' # End-index for the sequences. end_idx = start_idx + length # Select the sequences from the given start-index and # of the given length. x_values = x_values[start_idx:end_idx] y_true = y_true[start_idx:end_idx] # Input-signals for the model. x_values = np.expand_dims(x_values, axis=0) # Use the model to predict the output-signals. y_pred = self.model.predict(x_values) # The output of the model is between 0 and 1. # Do an inverse map to get it back to the scale # of the original data-set. y_pred_rescaled = self.y_scaler.inverse_transform(y_pred[0]) # For each output-signal. for signal in range(len(self.target_names)): # Create a filename filename = ( '/tmp/batch_{}_epochs_{}_training_{}_{}_{}_{}.png').format( self.batch_size, self.epochs, self.num_train, signal, int(time.time()), shim) # Get the output-signal predicted by the model. signal_pred = y_pred_rescaled[:, signal] # Get the true output-signal from the data-set. signal_true = y_true[:, signal] # Make the plotting-canvas bigger. plt.figure(figsize=(15, 5)) # Plot and compare the two signals. plt.plot(signal_true, label='true') plt.plot(signal_pred, label='pred') # Plot grey box for warmup-period. _ = plt.axvspan( 0, self.warmup_steps, facecolor='black', alpha=0.15) # Plot labels etc. plt.ylabel(self.target_names[signal]) plt.legend() # Show and save the image if self.display is True: plt.savefig(filename, bbox_inches='tight') plt.show() else: plt.savefig(filename, bbox_inches='tight') print('> Saving file: {}'.format(filename)) def data(self): """Get data to analyze. Args: None Returns: (x_data, y_data): X and Y values as numpy arrays """ # Download data weather.maybe_download_and_extract() # Import data into Pandas dataframe pandas_df = weather.load_resampled_data() print('\n> First Rows of Data:\n\n{}'.format(pandas_df.head(3))) # Print the cities cities = weather.cities print('\n> Cities: {}'.format(cities)) # Print dataframe shape print( '> Dataframe shape (Original): {}'.format(pandas_df.values.shape)) # The two signals that have missing data. (Columns with Nans) pandas_df.drop(('Esbjerg', 'Pressure'), axis=1, inplace=True) pandas_df.drop(('Roskilde', 'Pressure'), axis=1, inplace=True) # Print dataframe shape print('> Dataframe shape (New): {}'.format(pandas_df.values.shape)) # Verify that the columns have been dropped print( '\n> First Rows of Updated Data:\n\n{}'.format(pandas_df.head(1))) # Add Data ''' We can add some input-signals to the data that may help our model in making predictions. For example, given just a temperature of 10 degrees Celcius the model wouldn't know whether that temperature was measured during the day or the night, or during summer or winter. The model would have to infer this from the surrounding data-points which might not be very accurate for determining whether it's an abnormally warm winter, or an abnormally cold summer, or whether it's day or night. So having this information could make a big difference in how accurately the model can predict the next output. Although the data-set does contain the date and time information for each observation, it is only used in the index so as to order the data. We will therefore add separate input-signals to the data-set for the day-of-year (between 1 and 366) and the hour-of-day (between 0 and 23). ''' pandas_df['Various', 'Day'] = pandas_df.index.dayofyear pandas_df['Various', 'Hour'] = pandas_df.index.hour # Target Data for Prediction ''' We will try and predict the future weather-data for this city. ''' target_city = 'Odense' ''' We will try and predict these signals. ''' self.target_names = ['Temp', 'WindSpeed', 'Pressure'] ''' The following is the number of time-steps that we will shift the target-data. Our data-set is resampled to have an observation for each hour, so there are 24 observations for 24 hours. If we want to predict the weather 24 hours into the future, we shift the data 24 time-steps. If we want to predict the weather 7 days into the future, we shift the data 7 * 24 time-steps. ''' shift_days = 1 shift_steps = shift_days * 24 # Number of hours. # Create a new data-frame with the time-shifted data. ''' Note the negative time-shift! We want the future state targets to line up with the timestamp of the last value of each sample set. ''' df_targets = pandas_df[ target_city][self.target_names].shift(-shift_steps) ''' WARNING! You should double-check that you have shifted the data in the right direction! We want to predict the future, not the past! The shifted data-frame is confusing because Pandas keeps the original This is the first shift_steps + 5 rows of the original data-frame: ''' explanatory_hours = shift_steps + 5 print('\n> First Rows of Updated Data ({} hours):\n\n{}'.format( explanatory_hours, pandas_df[target_city][self.target_names].head(explanatory_hours))) ''' The following is the first 5 rows of the time-shifted data-frame. This should be identical to the last 5 rows shown above from the original data, except for the time-stamp. ''' print('\n> First Rows of Shifted Data - Target Labels ' '(Notice 1980 Dates):\n\n{}'.format(df_targets.head(5))) ''' The time-shifted data-frame has the same length as the original data-frame, but the last observations are NaN (not a number) because the data has been shifted backwards so we are trying to shift data that does not exist in the original data-frame. ''' print('\n> Last Rows of Shifted Data - Target Labels ' '(Notice 2018 Dates):\n\n{}'.format(df_targets.tail())) # NumPy Arrays ''' We now convert the Pandas data-frames to NumPy arrays that can be input to the neural network. We also remove the last part of the numpy arrays, because the target-data has NaN for the shifted period, and we only want to have valid data and we need the same array-shapes for the input- and output-data. These are the input-signals: ''' x_data = pandas_df.values[0:-shift_steps] y_data = df_targets.values[:-shift_steps] # Return return (x_data, y_data) def loss_mse_warmup(self, y_true, y_pred): """Calculate the Mean Squared Errror. Calculate the Mean Squared Error between y_true and y_pred, but ignore the beginning "warmup" part of the sequences. We will use Mean Squared Error (MSE) as the loss-function that will be minimized. This measures how closely the model's output matches the true output signals. However, at the beginning of a sequence, the model has only seen input-signals for a few time-steps, so its generated output may be very inaccurate. Using the loss-value for the early time-steps may cause the model to distort its later output. We therefore give the model a "warmup-period" of 50 time-steps where we don't use its accuracy in the loss-function, in hope of improving the accuracy for later time-steps Args: y_true: Desired output. y_pred: Model's output. Returns: loss_mean: Mean Squared Error """ warmup_steps = self.warmup_steps # The shape of both input tensors are: # [batch_size, sequence_length, num_y_signals]. # Ignore the "warmup" parts of the sequences # by taking slices of the tensors. y_true_slice = y_true[:, warmup_steps:, :] y_pred_slice = y_pred[:, warmup_steps:, :] # These sliced tensors both have this shape: # [batch_size, sequence_length - warmup_steps, num_y_signals] # Calculate the MSE loss for each value in these tensors. # This outputs a 3-rank tensor of the same shape. loss = tf.losses.mean_squared_error(labels=y_true_slice, predictions=y_pred_slice) # Keras may reduce this across the first axis (the batch) # but the semantics are unclear, so to be sure we use # the loss across the entire tensor, we reduce it to a # single scalar with the mean function. loss_mean = tf.reduce_mean(loss) return loss_mean
class KerasCNN(object): """Support vector machine class.""" # Convolutional Layer 1. filter_size1 = 5 # Convolution filters are 5 x 5 pixels. num_filters1 = 16 # There are 16 of these filters. # Convolutional Layer 2. filter_size2 = 5 # Convolution filters are 5 x 5 pixels. num_filters2 = 36 # There are 36 of these filters. # Fully-connected layer. fc_size = 128 # Number of neurons in fully-connected laye # Get data from files data = MNIST(data_dir='/tmp/data/MNIST/') # The number of pixels in each dimension of an image. img_size = data.img_size # The images are stored in one-dimensional arrays of this length. img_size_flat = data.img_size_flat # Tuple with height and width of images used to reshape arrays. img_shape = data.img_shape # Tuple with height, width and depth used to reshape arrays. # This is used for reshaping in Keras. img_shape_full = data.img_shape_full # Number of classes, one class for each of 10 digits. num_classes = data.num_classes # Number of colour channels for the images: 1 channel for gray-scale. num_channels = data.num_channels def __init__(self): """Instantiate the class. Args: train_batch_size: Training batch size Returns: None """ # Initialize variables epochs = 2 """ print('{0: <{1}} {2}'.format('Encoded X image:', fill, self.x_image)) """ # Start construction of the Keras Sequential model. self.model = Sequential() # Add an input layer which is similar to a feed_dict in TensorFlow. # Note that the input-shape must be a tuple containing the image-size. self.model.add(InputLayer(input_shape=(self.img_size_flat,))) # The input is a flattened array with 784 elements, # but the convolutional layers expect images with shape (28, 28, 1) self.model.add(Reshape(self.img_shape_full)) # First convolutional layer with ReLU-activation and max-pooling. self.model.add( Conv2D(kernel_size=5, strides=1, filters=16, padding='same', activation='relu', name='layer_conv1')) self.model.add(MaxPooling2D(pool_size=2, strides=2)) # Second convolutional layer with ReLU-activation and max-pooling. self.model.add( Conv2D(kernel_size=5, strides=1, filters=36, padding='same', activation='relu', name='layer_conv2')) self.model.add(MaxPooling2D(pool_size=2, strides=2)) # Flatten the 4-rank output of the convolutional layers # to 2-rank that can be input to a fully-connected / dense layer. self.model.add(Flatten()) # First fully-connected / dense layer with ReLU-activation. self.model.add(Dense(128, activation='relu')) # Last fully-connected / dense layer with softmax-activation # for use in classification. self.model.add(Dense(self.num_classes, activation='softmax')) # Model Compilation ''' The Neural Network has now been defined and must be finalized by adding a loss-function, optimizer and performance metrics. This is called model "compilation" in Keras. We can either define the optimizer using a string, or if we want more control of its parameters then we need to instantiate an object. For example, we can set the learning-rate. ''' optimizer = Adam(lr=1e-3) ''' For a classification-problem such as MNIST which has 10 possible classes, we need to use the loss-function called categorical_crossentropy. The performance metric we are interested in is the classification accuracy. ''' self.model.compile( optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) # Training ''' Now that the model has been fully defined with loss-function and optimizer, we can train it. This function takes numpy-arrays and performs the given number of training epochs using the given batch-size. An epoch is one full use of the entire training-set. So for 10 epochs we would iterate randomly over the entire training-set 10 times. ''' self.model.fit(x=self.data.x_train, y=self.data.y_train, epochs=epochs, batch_size=128) # Evaluation ''' Now that the model has been trained we can test its performance on the test-set. This also uses numpy-arrays as input. ''' result = self.model.evaluate(x=self.data.x_test, y=self.data.y_test) ''' Print actual versus predicted values ''' print('\nActual vs Predicted X values') start = 0 stop = 300 predictions = self.model.predict(self.data.x_test[start:stop]) for pointer in range(start, stop): predicted = np.argmax(predictions[pointer]) actual = np.argmax(self.data.y_test[pointer]) print( '{}: Actual: {}\tPredicted: {}\tMatch: {}'.format( str(pointer).zfill(3), predicted, actual, predicted == actual)) ''' We can print all the performance metrics for the test-set. ''' print('\nPerfomance metrics') for name, value in zip(self.model.metrics_names, result): print('{} {}'.format(name, value)) ''' Print the model summary ''' print('\n\nModel Summary\n\n{}'.format(self.model.summary())) def plot_example_errors(self, cls_pred): """Plot 9 images in a 3x3 grid. Function used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image. Args: cls_pred: Array of the predicted class-number for all images in the test-set. Returns: None """ # Boolean array whether the predicted class is incorrect. incorrect = (cls_pred != self.data.y_test_cls) # Get the images from the test-set that have been # incorrectly classified. images = self.data.x_test[incorrect] # Get the predicted classes for those images. cls_pred = cls_pred[incorrect] # Get the true classes for those images. cls_true = self.data.y_test_cls[incorrect] # Plot the first 9 images. plot_images( images[0:9], self.img_shape, cls_true[0:9], cls_pred=cls_pred[0:9])