def evaluate(self, x, y): #Define evaluate sequence ''' For predict for Keras see line 56 in https://github.com/gradescope/fsdl-text-recognizer-project/blob/master/lab6_sln/text_recognizer/models/base.py ''' sequence = DatasetSequence(x, y, batch_size=12) preds = self.algorithm.predict(sequence.x) report = metrics.classification_report(sequence.y, preds) return report
def fit(self, dataset, batch_size=32, epochs=10, callbacks=[]): self.algorithm.compile(loss=self.loss(), optimizer=self.optimizer(), metrics=self.metrics()) train_sequence = DatasetSequence(dataset.x_train, dataset.y_train, batch_size, augment_fn=self.batch_augment_fn, format_fn=self.batch_format_fn) test_sequence = DatasetSequence(dataset.x_test, dataset.y_test, batch_size, augment_fn=self.batch_augment_fn, format_fn=self.batch_format_fn) self.algorithm.fit_generator(train_sequence, epochs=epochs, callbacks=callbacks, validation_data=test_sequence, use_multiprocessing=True, workers=1, shuffle=True)
def fit(self, dataset, batch_size=32, epochs=10, callbacks=[], lr: Optional[float] = 0.001, beta_1: Optional[float] = 0.9, beta_2: Optional[float] = 0.999, epsilon=None, decay=0.0): self.algorithm.compile(loss=self.loss(), optimizer=self.adam_optimizer(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, decay=decay), metrics=self.metrics()) train_sequence = DatasetSequence(dataset.x_train, dataset.y_train, batch_size, augment_fn=self.batch_augment_fn, format_fn=self.batch_format_fn) test_sequence = DatasetSequence(dataset.x_test, dataset.y_test, batch_size, augment_fn=self.batch_augment_fn, format_fn=self.batch_format_fn) self.algorithm.fit_generator(train_sequence, epochs=epochs, callbacks=callbacks, validation_data=test_sequence, use_multiprocessing=True, workers=1, shuffle=True)
def fit(self, dataset, batch_size=None, epochs=None, callbacks=[]): #Define fit sequence ''' Fit generator for keras. See line 44 https://github.com/gradescope/fsdl-text-recognizer-project/blob/master/lab6_sln/text_recognizer/models/base.py Arguments for fit generator train_sequence, epochs = epochs, callbacks = callbacks, validation_data = test_sequence, use_multiprocessing = False, workers = 1, shuffle = True ''' #Updated for sklearn train_sequence = DatasetSequence(dataset.x_train, dataset.y_train, batch_size) self.algorithm.fit(train_sequence.x, train_sequence.y)
def evaluate(self, x, y): sequence = DatasetSequence( x, y, batch_size=16) # Use a small batch size to use less memory preds = self.algorithm.predict_generator(sequence) return np.mean(np.argmax(preds, -1) == np.argmax(y, -1))
def evaluate(self, x, y): sequence = DatasetSequence(x, y, batch_size=12) preds = self.algorithm.predict(sequence.x) report = metrics.classification_report(sequence.y, preds) return report