def fit(self, db: Dataset, epochs: int = 1000001, batch_size: int = 128, patience: int = 20, callbacks: list = []) -> History: history = {'loss': [], 'val_loss': []} epoch = [] for i in range(epochs): anchor, positive, negetive = self._get_batch( db.X_train, db.y_train, db.info['n_cls'], batch_size) loss = self.model.train_on_batch( [anchor, positive, negetive], anchor) history['loss'].append(loss) anchor, positive, negetive = self._get_batch( db.X_test, db.y_test, db.info['n_cls'], batch_size) val_loss = self.model.test_on_batch( [anchor, positive, negetive], anchor) history['val_loss'].append(val_loss) epoch.append(i) if not i % 100: print("Batch %d --> loss: %.5f - val_loss: %.5f" % (i, loss, val_loss)) if i and history['val_loss'][-101] <= val_loss: patience -= 1 if not patience: break return History(epoch=epoch, history=history)
def fit_on_batch(self, db: Dataset, gen: BaseGenerator, epochs: int = 1000000, batch_size: int = 128, patience: int = 100, verbose: int = 2, callbacks: list = []) -> History: history = {} for item in self.model.metrics_names: history.update({item: []}) history.update({'val_' + item: []}) def _print_report(ltype, metrics_value): i = 0 for item in self.model.metrics_names: if ltype == 'train': print("%s: %.5f - " % (item, metrics_value[i]), end='') elif ltype == 'test': print("%s: %.5f - " % ('val_' + item, metrics_value[i]), end='') i += 1 def _update_history(ltype, metrics_value): i = 0 for item in self.model.metrics_names: if ltype == 'train': history[item].append(metrics_value[i]) elif ltype == 'test': history['val_' + item].append(metrics_value[i]) i += 1 epoch = [] for i in range(epochs): X_data, y_data = gen.get_batch() metrics_value = self.model.train_on_batch( X_data, to_categorical(y_data, num_classes=db.info['n_cls'])) _update_history('train', metrics_value) val_metrics_value = self.model.test_on_batch( db.X_test, db.Y_test()) _update_history('test', val_metrics_value) epoch.append(i) if not i % 100: print("Batch %d --> " % i, end='') _print_report('train', metrics_value) _print_report('test', val_metrics_value) print('') if i and history['val_loss'][-101] <= val_metrics_value[0]: patience -= 1 if not patience: break return History(epoch=epoch, history=history)
def fit(self, db: Dataset, epochs: int = 1000001, batch_size: int = 128, patience: int = 20) -> History: history = {} for item in self.model.metrics_names: history.update({item: []}) history.update({'val_' + item: []}) def _print_report(ltype, metrics_value): i = 0 for item in self.model.metrics_names: if ltype == 'train': print("%s: %.5f - " % (item, metrics_value[i]), end='') elif ltype == 'test': print("%s: %.5f - " % ('val_' + item, metrics_value[i]), end='') i += 1 def _update_history(ltype, metrics_value): i = 0 for item in self.model.metrics_names: if ltype == 'train': history[item].append(metrics_value[i]) elif ltype == 'test': history['val_' + item].append(metrics_value[i]) i += 1 epoch = [] for i in range(epochs): in_1, in_2, out = self._get_batch(db.X_train, db.y_train, db.info['n_cls'], batch_size) metrics_value = self.model.train_on_batch([in_1, in_2], out) _update_history('train', metrics_value) in_1, in_2, out = self._get_batch(db.X_test, db.y_test, db.info['n_cls'], batch_size) val_metrics_value = self.model.test_on_batch([in_1, in_2], out) _update_history('test', val_metrics_value) epoch.append(i) if not i % 100: print("Batch %d --> " % i, end='') _print_report('train', metrics_value) _print_report('test', val_metrics_value) print('') if i and history['val_loss'][-101] <= val_metrics_value[0]: patience -= 1 if not patience: break return History(epoch=epoch, history=history)
def fit(self, db: Dataset, epochs: int = 1000, batch_size: int = 128, verbose: int = 2, callbacks: list = []) -> History: history = self.model.fit(db.X_train, db.Y_train(), validation_data=(db.X_test, db.Y_test()), epochs=epochs, batch_size=batch_size, verbose=verbose, callbacks=callbacks) return History(history.epoch, history.params, history.history)