def compile_and_fit(model: tf.python.keras.engine.sequential.Sequential, window: window_generator.WindowGenerator, patience: int = 2, max_epochs: int = MAX_EPOCHS, log: bool = False) -> tf.python.keras.callbacks.History: """ Returns history object and acts as a training procedure for models. Logging requires tensorboard. - patience > 0 - max_epochs > 0 """ early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, mode='min') model.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam(), metrics=[tf.metrics.MeanAbsoluteError()]) if log: log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=log_dir, histogram_freq=1) history = model.fit(window.train, epochs=max_epochs, validation_data=window.val, callbacks=[early_stopping, tensorboard_callback]) else: history = model.fit(window.train, epochs=max_epochs, validation_data=window.val, callbacks=[early_stopping]) return history
def final_model_evaluation(model: tf.python.keras.engine.sequential.Sequential, window: window_generator.WindowGenerator) -> None: """ Plots final model loss and mean absolute error for valuation and testing datasets. """ val = model.evaluate(window.val) test = model.evaluate(window.test, verbose=0) fig = go.Figure([go.Bar(name='Loss', x=['val', 'test'], y=[val[0], test[0]]), go.Bar(name='Mean Absolute Error', x=['val', 'test'], y=[val[1], test[1]])]) fig.update_layout( font=dict( family="Courier New, monospace", size=12), legend=dict( yanchor="top", y=0.99, xanchor="left", x=0.01 ), margin=go.layout.Margin( r=0, # right margin t=0 # top margin ) ) fig.show()
def save_model(model: tf.python.keras.engine.sequential.Sequential, history: dict, filename: str, location: str = MODEL_LOCATION) -> None: """ Saves model to a specified location with a specified name. """ print(f"SAVING model to {location} as {filename}") model.save(location + filename) with open(location + filename + "/history.p", 'wb') as file_pi: pickle.dump(history, file_pi)
def accuracy_metric(est: tensorflow.python.keras.engine.sequential.Sequential, X: np.array, y: np.array) -> float: """ TensorFlow estimator accuracy .""" pred_y = (est.predict(X)) acc = (np.sum(y.astype(int)[0] == pred_y.round().astype(int))) / y.shape[0] return acc
def make_prediction(model: tf.python.keras.engine.sequential.Sequential, image: np.array, shape) -> list: """Based on one image, a model makes a prediction to what it is Args: model (tf.python.keras.engine.sequential.Sequential): The model to use image (Image.Image): The imaage to predict on Returns: list: The probability of the given image beign each class. Softmax not yet applied """ shape = (1, shape[0], shape[1], shape[2]) img_reshaped = tf.reshape(image, shape) return model.predict_step(img_reshaped)
def predict_plot(model: tf.python.keras.engine.sequential.Sequential, window: window_generator.WindowGenerator, label: str, matplot: bool = False) -> None: """ Plots a graph of a feature predictions on top of the real feature values. Primarily uses plotly but has matplotlib as a backup. - label in window.label_columns or window.label_columns == None """ plot_col_index = window.column_indices[label] prediction = model.predict(window.ground) ground = prediction[:, -1, plot_col_index] val = np.empty_like(window.dfs.ground_df[label]) val.fill(np.nan) val[window.ws.total_window_size - 1:] = ground plot(val, window, label, matplot=matplot)
def evaluate_model(model: tf.python.keras.engine.sequential.Sequential, window: window_generator.WindowGenerator, history: dict = None, verbose: int = 0) -> None: """ Displays information about the model and its training history. Verbose = 0 for only basic information about the final model. Verbose = 1 for information about the final model and training history. Verbose = 2 for information about the final model, training history, and shape. - verbose in [0, 1, 2] """ final_model_evaluation(model, window) if verbose == 1: model_history_evaluation(history) elif verbose == 2: model_history_evaluation(history) print(model.summary())
def train_model(model: tf.python.keras.engine.sequential.Sequential, train_images: np.array, train_labels: np.array, val_images: np.array, val_labels: np.array, epochs: int, noise_tuple) -> None: """Method for using the data set on some input model Args: model (tf.python.keras.engine.sequential.Sequential): A previoslt created model, which is the trained using the inputette dataset train_images (numpy.array): training images train_labels (numpy.array): labels for the training images test_images (numpy.array): test images test_labels (numpy.array): labels for the test images """ if len(train_images) == 0 or len(train_labels) == 0 or len( val_images) == 0 or len(val_labels) == 0: print( f"ERROR: When training, either the train or validation set contains an empty list." ) print(f" - train images : {len(train_images)}\n") print(f" - train lables : {len(train_labels)}\n") print(f" - validation images: {len(val_images)}\n") print(f" - validation images: {len(val_labels)}\n") sys.exit() if len(train_labels) != len(train_images) or len(val_images) != len( val_labels): print(f"ERROR: the image and label lists are not the same size:") print( f" - train images : {len(train_images)} - {len(train_labels)} : train lables" ) print( f" - validation images : {len(val_images)} - {len(val_labels)} : validation lables" ) sys.exit() filter_names = [] # initial_learning_rate = 0.001 #TODO should be default (0.0001, maybe) # opt = tf.keras.optimizers.Adam( # learning_rate=initial_learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False, name='Adam' # ) # model.compile(optimizer='adam', # loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), # metrics=['sparse_categorical_accuracy']) # noise_method, noise_names, augmentation = noise_tuple # try: # model, validation_loss, val_accuracy = fit_model(model, train_images, train_labels, val_images, val_labels, noise_names, monitor='val_loss', # epochs=100, restore_weights=False, apply_noise_method=noise_method, augmentation=augmentation) # except Exception as e: # print(f"ERROR: {e}") # raise Exception # # sys.exit() # return validation_loss, val_accuracy initial_learning_rate = 0.001 #TODO should be default (0.0001, maybe) opt = tf.keras.optimizers.Adam(learning_rate=initial_learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False, name='Adam') earlystop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.01, patience=20, verbose=0, mode='auto', baseline=None, restore_best_weights=False) model.compile( optimizer=opt, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['sparse_categorical_accuracy']) def lr_exp_decay(epoch, lr): k = 0.1 return initial_learning_rate * math.exp(-k * epoch) history = model.fit(train_images, train_labels, epochs=epochs, validation_data=(val_images, val_labels), callbacks=[ tf.keras.callbacks.LearningRateScheduler( lr_exp_decay, verbose=1), earlystop ]) validation_loss = history.history['val_loss'] validation_accuracy = history.history['val_sparse_categorical_accuracy'] learning_rate = K.eval(model.optimizer.lr) print( f"The initial learning rate is: {initial_learning_rate}, and the final learning rate is: {learning_rate}" ) return validation_loss, validation_accuracy
def flatten_and_dense(model:tf.python.keras.engine.sequential.Sequential, output_layer_size=62): """Returns a model flattened and densed to output_layer_size number of categories of prediction""" model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(output_layer_size)) return model