def fit(self, x, y, validation_data=None, batch_size=128, epochs=1, iterations_to_validate=None, patience=0, verbose=1, **kwargs): print('\nTraining Model') self.verbose = verbose if validation_data is not None: return self.fit_tf_val(x, y, validation_data, batch_size, epochs, iterations_to_validate, patience, verbose) train_ds = tf.data.Dataset.from_tensor_slices( (x, y)).shuffle(10000).batch(batch_size) for epoch in range(epochs): start_time = time.time() for images, labels in train_ds: self.train_step(images, labels) if self.verbose: self.eval_step(images, labels) template = 'Epoch {}, Loss: {}, Acc: {}, Time: {}' print( template.format(epoch + 1, self.eval_loss.result(), self.eval_accuracy.result() * 100, delta_timer(time.time() - start_time))) self.eval_loss.reset_states() self.eval_accuracy.reset_states() self.eval_tf(x, y, verbose=verbose)
def print_scores_times_to_file(file_path, header, scores_list, times_list): with open(file_path, "a+") as text_file: text_file.write(header + '\n') scores_grouped = group_scores(scores_list) metric_names = ['diri', 'simple', 'entropy', 'xH'] for i in range(len(scores_grouped)): text_file.write('%s: %.5f+/-%.5f\n' % (metric_names[i], np.mean(scores_grouped[i]), scipy.stats.sem(scores_grouped[i]))) text_file.write("Time: %s\n\n" % utils.delta_timer(np.mean(times_list)))
def fit_tf_val(self, x, y, validation_data=None, batch_size=128, epochs=1, iterations_to_validate=None, patience=0, verbose=1): self.verbose = verbose n_iterations_in_epoch = len(y) // batch_size # check if validate at end of epoch if iterations_to_validate is None: iterations_to_validate = n_iterations_in_epoch self.best_model_so_far = { general_keys.ITERATION: 0, general_keys.LOSS: 1e100, general_keys.NOT_IMPROVED_COUNTER: 0, } train_ds = tf.data.Dataset.from_tensor_slices( (x, y)).shuffle(10000).batch(batch_size) val_ds = tf.data.Dataset.from_tensor_slices( (validation_data[0], validation_data[1])).batch(1024) self.check_best_model_save(iteration=0) for epoch in range(epochs): start_time = time.time() for it_i, (images, labels) in enumerate(train_ds): self.train_step(images, labels) if it_i % iterations_to_validate == 0 and it_i != 0: if self.check_early_stopping(patience): return for test_images, test_labels in val_ds: self.eval_step(test_images, test_labels) # TODO: check train printer if self.verbose: template = 'Epoch {}, Loss: {}, Acc: {}, Val loss: {}, Val acc: {}, Time: {}' print( template.format( epoch + 1, self.train_loss.result(), self.train_accuracy.result() * 100, self.eval_loss.result(), self.eval_accuracy.result() * 100, delta_timer(time.time() - start_time))) self.check_best_model_save(it_i + ( (epoch + 1) * n_iterations_in_epoch)) self.eval_loss.reset_states() self.eval_accuracy.reset_states() self.train_loss.reset_states() self.train_accuracy.reset_states() self.load_weights(self.best_model_weights_path).expect_partial()
def eval_tf(self, x, y, batch_size=1024, verbose=1): self.verbose = verbose self.eval_loss.reset_states() self.eval_accuracy.reset_states() dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size) start_time = time.time() for images, labels in dataset: self.eval_step(images, labels) if self.verbose: template = 'Loss: {}, Acc: {}, Time: {}' print( template.format(self.eval_loss.result(), self.eval_accuracy.result() * 100, delta_timer(time.time() - start_time))) results_dict = { general_keys.LOSS: self.eval_loss.result(), general_keys.ACCURACY: self.eval_accuracy.result() } self.eval_loss.reset_states() self.eval_accuracy.reset_states() return results_dict