class CustomHistory(Callback):
    def __init__(self):
        self.history = History()
        self.history.on_train_begin()

    def on_epoch_end(self, epoch, logs=None):
        self.history.on_epoch_end(epoch=epoch, logs=logs)
Esempio n. 2
0
class ModelTrainer(object):
    def __init__(self, model=None):
        self.model = model

    def compile(self, optimizer, loss, device='cpu'):
        self.optimizer = optimizer
        self.criterion = loss
        self.device = device

    def fit(self,
            train_loader,
            val_loader,
            model_checker=None,
            reduceLROnPlateau=True,
            epochs=100,
            sgdr=True):
        self.history = History()
        self.history.on_train_begin()

        scheduler = ReduceLROnPlateau(self.optimizer, 'min', patience=3)
        sgdr_scheduler = CosineAnnealingWithRestartsLR(self.optimizer,
                                                       T_max=1,
                                                       T_mult=2)

        for epoch in range(epochs):

            if sgdr:
                sgdr_scheduler.step()

            train_logs = train(self.model, train_loader, self.criterion,
                               self.optimizer, self.device)
            # evaluate
            val_logs = evaluate(self.model, val_loader, self.criterion,
                                self.device)
            self.history.on_epoch_end(epoch, {**train_logs, **val_logs})
            plot_history(self.history, metrics=['loss', 'f1'])
            if model_checker is not None:
                model_checker.set_model(self.model)
                model_checker.on_epoch_end(epoch, {**train_logs, **val_logs})
            if reduceLROnPlateau:
                scheduler.step(val_logs['val_loss'])
Esempio n. 3
0
 def on_epoch_end(self, epoch, logs={}):
     History.on_epoch_end(self, epoch, logs)
     record_results(self, self.model, self.location)
     make_plots(self, self.model, self.location, self.name)
Esempio n. 4
0
def train_model(model, data, config, include_tensorboard):
	model_history = History()
	model_history.on_train_begin()
	saver = ModelCheckpoint(full_path(config.model_file()), verbose=1, save_best_only=True, period=1)
	saver.set_model(model)
	early_stopping = EarlyStopping(min_delta=config.min_delta, patience=config.patience, verbose=1)
	early_stopping.set_model(model)
	early_stopping.on_train_begin()
	csv_logger = CSVLogger(full_path(config.csv_log_file()))
	csv_logger.on_train_begin()
	if include_tensorboard:
		tensorborad = TensorBoard(histogram_freq=10, write_images=True)
		tensorborad.set_model(model)
	else:
	 tensorborad = Callback()

	epoch = 0
	stop = False
	while(epoch <= config.max_epochs and stop == False):
		epoch_history = History()
		epoch_history.on_train_begin()
		valid_sizes = []
		train_sizes = []
		print("Epoch:", epoch)
		for dataset in data.datasets:
			print("dataset:", dataset.name)
			model.reset_states()
			dataset.reset_generators()

			valid_sizes.append(dataset.valid_generators[0].size())
			train_sizes.append(dataset.train_generators[0].size())
			fit_history = model.fit_generator(dataset.train_generators[0],
				dataset.train_generators[0].size(), 
				nb_epoch=1, 
				verbose=0, 
				validation_data=dataset.valid_generators[0], 
				nb_val_samples=dataset.valid_generators[0].size())

			epoch_history.on_epoch_end(epoch, last_logs(fit_history))

			train_sizes.append(dataset.train_generators[1].size())
			fit_history = model.fit_generator(dataset.train_generators[1],
				dataset.train_generators[1].size(),
				nb_epoch=1, 
				verbose=0)

			epoch_history.on_epoch_end(epoch, last_logs(fit_history))

		epoch_logs = average_logs(epoch_history, train_sizes, valid_sizes)
		model_history.on_epoch_end(epoch, logs=epoch_logs)
		saver.on_epoch_end(epoch, logs=epoch_logs)
		early_stopping.on_epoch_end(epoch, epoch_logs)
		csv_logger.on_epoch_end(epoch, epoch_logs)
		tensorborad.on_epoch_end(epoch, epoch_logs)
		epoch+= 1

		if early_stopping.stopped_epoch > 0:
			stop = True

	early_stopping.on_train_end()
	csv_logger.on_train_end()
	tensorborad.on_train_end({})