Пример #1
0
    def fit(self,
            dataloader,
            nb_iter=None,
            nb_epoch=None,
            iter_per_epoch=None,
            callbacks=[],
            verbose=0):
        """Trains the underlying Keras model.

        Args:
            dataloader (StandardDataLoader): Manages the loading of data to
                model.
            nb_iter (int): The number of iterations to train the model.
            nb_epoch (int): The number of epochs to train the model.
            iter_per_epoch (int): Defines the number of iterations per epoch.
            callbacks (list): List of Keras callbacks to run during training.
        """
        nb_iter, iter_per_epoch = self._get_iterations(nb_iter, nb_epoch,
                                                       iter_per_epoch)
        callbacks = CallbackList(callbacks)
        callbacks._set_model(self)
        callbacks.on_train_begin()

        try:
            epoch = 0
            self.stop_training = False
            for i in xrange(nb_iter):
                # Begin epoch
                if i % iter_per_epoch == 0:
                    callbacks.on_epoch_begin(epoch)

                # Execution
                callbacks.on_batch_begin(i)

                if verbose > 0:
                    import time
                    time.sleep(0.001)
                    j = i % iter_per_epoch
                    perc = int(100 * (j + 1) / iter_per_epoch)
                    prog = ''.join(['='] * (perc / 2))
                    string = "[{:50s}] {:3d}%\r".format(prog, perc)
                    sys.stdout.write(string)
                    sys.stdout.flush()

                losses = self.keras_model.train_on_batch(
                    *dataloader.get_training_batch())

                callbacks.on_batch_end(i)

                # End epoch
                if (i + 1) % iter_per_epoch == 0:
                    callbacks.on_epoch_end(epoch, logs={'losses': losses})
                    epoch += 1
                if self.stop_training:
                    break
        except KeyboardInterrupt:
            print "\n[BayesNet] Abort: KeyboardInterrupt"
            raise

        callbacks.on_train_end()
Пример #2
0
    def fit(self, dataloader, nb_iter=None, nb_epoch=None, iter_per_epoch=None,
            callbacks=[], verbose=0):
        """Trains the underlying Keras model.

        Args:
            dataloader (StandardDataLoader): Manages the loading of data to
                model.
            nb_iter (int): The number of iterations to train the model.
            nb_epoch (int): The number of epochs to train the model.
            iter_per_epoch (int): Defines the number of iterations per epoch.
            callbacks (list): List of Keras callbacks to run during training.
        """
        nb_iter, iter_per_epoch = self._get_iterations(
            nb_iter, nb_epoch, iter_per_epoch)
        callbacks = CallbackList(callbacks)
        callbacks._set_model(self)
        callbacks.on_train_begin()

        try:
            epoch = 0
            self.stop_training = False
            for i in xrange(nb_iter):
                # Begin epoch
                if i % iter_per_epoch == 0:
                    callbacks.on_epoch_begin(epoch)

                # Execution
                callbacks.on_batch_begin(i)

                if verbose > 0:
                    import time
                    time.sleep(0.001)
                    j = i % iter_per_epoch
                    perc = int(100 * (j + 1) /iter_per_epoch)
                    prog = ''.join(['='] * (perc/2))
                    string = "[{:50s}] {:3d}%\r".format(prog, perc)
                    sys.stdout.write(string); sys.stdout.flush()

                losses = self.keras_model.train_on_batch(
                    *dataloader.get_training_batch())

                callbacks.on_batch_end(i)

                # End epoch
                if (i + 1) % iter_per_epoch == 0:
                    callbacks.on_epoch_end(epoch, logs={'losses': losses})
                    epoch += 1
                if self.stop_training:
                    break
        except KeyboardInterrupt:
            print "\n[BayesNet] Abort: KeyboardInterrupt"
            raise

        callbacks.on_train_end()
Пример #3
0
    callbacks += [lrs]

    #mcp = ModelCheckpoint('results/experiment' + experiment_name + '_epoch{epoch}_weights.hdf5', save_best_only=True)
    #callbacks += [mcp]

    #lrr = INILearningRateReducer(monitor='val_acc', improve='increase', decrease_factor=0.1, patience=3, stop=3, verbose=1)
    #callbacks += [lrr]

    callbacks = CallbackList(callbacks)

    shuffle_on_epoch_start = True
    metrics = ['loss', 'acc', 'val_loss', 'val_acc',
               'val_class_acc']  # show those at epoch end
    do_validation = True

    callbacks._set_model(model)
    callbacks._set_params({
        'batch_size': batch_size,
        'nb_epoch': nb_epoch,
        'nb_sample': nb_train_sample,
        'verbose': 1,
        'do_validation': do_validation,
        'metrics': metrics,
    })

    ##########################
    # TRAINING
    ##########################
    callbacks.on_train_begin()

    model.stop_training = False
    lrs = INILearningRateScheduler(schedule, mode='batch', logger=logger)
    callbacks += [lrs]

    #mcp = ModelCheckpoint('results/experiment' + experiment_name + '_epoch{epoch}_weights.hdf5', save_best_only=True)
    #callbacks += [mcp]

    #lrr = INILearningRateReducer(monitor='val_acc', improve='increase', decrease_factor=0.1, patience=3, stop=3, verbose=1)
    #callbacks += [lrr]

    callbacks = CallbackList(callbacks)

    shuffle_on_epoch_start = True
    metrics = ['loss', 'acc', 'val_loss', 'val_acc', 'val_class_acc'] # show those at epoch end
    do_validation = True

    callbacks._set_model(model)
    callbacks._set_params({
        'batch_size': batch_size,
        'nb_epoch': nb_epoch,
        'nb_sample': nb_train_sample,
        'verbose': 1,
        'do_validation': do_validation,
        'metrics': metrics,
    })

    ##########################
    # TRAINING
    ##########################
    callbacks.on_train_begin()

    model.stop_training = False