def test_split_with_larger_number(self): sample_list = list(range(self.CHUNK_SIZE)) chunk, rest = utils.split(sample_list, self.CHUNK_SIZE + 1) self.assertEqual(len(chunk), self.CHUNK_SIZE) self.assertEqual(len(rest), 0)
def test_split(self): sample_list = list(range(self.CHUNK_SIZE)) split_size = 10 chunk, rest = utils.split(sample_list, split_size) self.assertEqual(len(chunk), split_size) self.assertEqual(len(rest), self.CHUNK_SIZE - split_size)
def fit(self, X, n_epochs=50, batch_size=256): indices_fracs = split(fracs=[0.9, 0.1], N=len(X), seed=0) X_train, X_valid = X[indices_fracs[0]], X[indices_fracs[1]] self.autoencoder.fit(X_train, X_train, epochs=n_epochs, batch_size=batch_size, shuffle=True, validation_data=(X_valid, X_valid))
def fit_generator(self, X, n_epochs, batch_size=256): indices_fracs = split(fracs=[0.9, 0.1], N=len(X), seed=0) X_train, X_valid = X[indices_fracs[0]], X[indices_fracs[1]] dataAugmentaion = ImageDataGenerator(fill_mode="nearest") self.autoencoder.fit_generator(dataAugmentaion.flow(X_train, X_train, batch_size=32), validation_data=(X_valid, X_valid), steps_per_epoch=len(X_train) // 32, epochs=10)
def fit(self, X, n_epochs=50, batch_size=256): es = EarlyStopping(monitor='val_loss', verbose=1, patience=10) indices_fracs = split(fracs=[0.9, 0.1], N=len(X), seed=0) X_train, X_valid = X[indices_fracs[0]], X[indices_fracs[1]] self.autoencoder.fit(X_train, X_train, epochs=n_epochs, batch_size=batch_size, shuffle=True, validation_data=(X_valid, X_valid), callbacks=[es])