def on_epoch_end(self, epoch, logs={}): Callback.on_epoch_end(self, epoch, logs=logs) # clear plot self.axFig.cla() # plot target embbeding targetEmbeddingHandle = self.axFig.scatter( self.targetEmbedding[:, 0], self.targetEmbedding[:, 1], marker="s", alpha=0.25, s=10, c=self.yTarget, cmap="rainbow", label="MMD target embedding") # plot network output projected on target embedding plotPredictions = self.netAnchorLayerPredict(self.xInput) # print(plotPredictions) projection = np.dot(plotPredictions, self.pca.components_[[0, 1]].transpose()) NetOuputHandle = self.axFig.scatter( projection[:, 0], projection[:, 1], marker=">", c=self.yInput, cmap="rainbow", alpha=0.25, s=10, label='Net output projected on target embedding') self.axFig.legend(handles=(targetEmbeddingHandle, NetOuputHandle)) plt.draw() plt.pause(0.01)
def on_epoch_end(self, epoch, logs={}): Callback.on_epoch_end(self, epoch, logs=logs) self.losses.append(logs.get('loss')) self.val_losses.append(logs.get('val_loss')) # plot the cost for training and testing so far lossHandle, = self.ax.plot(self.losses, color='blue', label='loss') val_lossHandle, = self.ax.plot(self.val_losses, color='red', label='validation loss') self.ax.legend(handles=[lossHandle, val_lossHandle]) plt.draw() plt.pause(0.01)
def train(self): model = self.prepare_model() training_info = self.get_training_info() epoch_count = training_info['epoch'] def on_epoch_end(epoch, log): nonlocal epoch_count epoch_count += 1 self.store_training_info({ 'epoch': epoch_count, 'val_acc': log['val_acc'], 'train_loss': log['loss'], 'val_loss': log['val_loss'] }) training_info_call_back = Callback() training_info_call_back.on_epoch_end = on_epoch_end model.fit(self.train_x, self.train_y, batch_size=2, epochs=100, validation_data=(self.test_x, self.test_y), initial_epoch=epoch_count, callbacks=[ModelCheckpoint(self.model_weight), training_info_call_back])