Exemple #1
0
def run():
    base_path = os.path.dirname(__file__) + '/'
    config = Config()
    tensorboard_data_dir = base_path + config.TENSORBOARD_DIR
    model_weights_dir = base_path + config.MODEL_WEIGHTS_DIR

    num_embeddings = 10000  # ITS FAKE DATA, SO...
    gu.seed_all(42)  # BECAUSE ITS THE ANSWER TO LIFE AND THE UNIVERSE

    data_stats = get_data_stats(data_generator(config, num_embeddings, num_cycles=1))
    print(data_stats)
    steps = data_stats['steps']

    generator_infinite = data_generator(config, num_embeddings)

    trainer = get_trainer(config,
                          num_embeddings,
                          generator_infinite,
                          generator_infinite,
                          steps,
                          steps,
                          model_weights_dir,
                          config.MODEL_WEIGHTS_FILE_NAME,
                          tensorboard_data_dir)
    trainer.summary()

    trainer.train(config.NUM_EPOCHS)

    evaluation = trainer.evaluate(generator_infinite, steps)
    print(evaluation)
Exemple #2
0
def run():
    gu.seed_all(42)  # BECAUSE ITS THE ANSWER TO LIFE AND THE UNIVERSE

    N, D_in, H, D_out, num_epochs, data_loader, data_loader_steps = get_parameters(
    )

    trainer = get_trainer(N, D_in, H, D_out, data_loader, data_loader_steps)

    trainer.summary()

    trainer.train(num_epochs)

    evaluation = trainer.evaluate(data_loader, data_loader_steps)
    print(evaluation)

    data_generator_for_predictions = eu.examples_prediction_data_generator(
        data_loader, data_loader_steps)

    #PREDICT ON A SINGLE SAMPLE
    sample = next(data_generator_for_predictions)[0]
    sample_prediction = trainer.predict_sample(sample)

    # PREDICT ON A SINGLE BATCH
    batch = next(data_generator_for_predictions)
    batch_prediction = trainer.predict_batch(batch)

    # PREDICTION ON A DATA LOADER
    data_loader_predictions = trainer.predict_data_loader(
        data_generator_for_predictions, data_loader_steps)
Exemple #3
0
def run():
    gu.seed_all(42)  # BECAUSE ITS THE ANSWER TO LIFE AND THE UNIVERSE

    N, D_in, H, D_out, num_epochs, data_loader, data_loader_steps = get_parameters()

    trainer = get_trainer(N, D_in, H, D_out, data_loader, data_loader_steps)
    
    trainer.train(num_epochs)
Exemple #4
0
def run():
    gu.seed_all(42)  # BECAUSE ITS THE ANSWER TO LIFE AND THE UNIVERSE

    trainer = get_trainer(params)

    trainer.summary()

    trainer.train(params['num_epochs'])

    evaluation = trainer.evaluate(test_data_loader, len(test_data_loader))

    print(evaluation)
Exemple #5
0
def run():
    gu.seed_all(42)  # BECAUSE ITS THE ANSWER TO LIFE AND THE UNIVERSE

    N, D_in, H, D_out, num_epochs, data_loader, data_loader_steps = get_parameters(
    )

    trainer = get_trainer(N, D_in, H, D_out, data_loader, data_loader_steps)

    trainer.summary()

    #WE NOW EXPECT SchedulerStep TO INVOKE StepLR AT THE END OF EVERY BATCH
    trainer.train(num_epochs)

    trainer.evaluate(data_loader, data_loader_steps)
Exemple #6
0
def run():
    gu.seed_all(42)  # BECAUSE ITS THE ANSWER TO LIFE AND THE UNIVERSE

    N, D_in, H, D_out, num_epochs, data_loader, data_loader_steps = get_parameters(
    )

    trainer = get_trainer(N, D_in, H, D_out, data_loader, data_loader_steps,
                          'LossOptimizerHandlerAccumulateBatches')

    trainer.train(num_epochs)

    trainer = get_trainer(N, D_in, H, D_out, data_loader, data_loader_steps,
                          'LossOptimizerHandlerAccumulateSamples')

    trainer.train(num_epochs)
Exemple #7
0
def run():
    gu.seed_all(42)  # BECAUSE ITS THE ANSWER TO LIFE AND THE UNIVERSE

    # N is batch size; D_in is input dimension;
    # H is hidden dimension; D_out is output dimension.
    N, D_in, H, D_out = 64, 1000, 100, 10
    num_epochs = 50
    data_loader = data_generator(N, D_in, D_out)
    data_loader_steps = 100

    trainer = get_trainer(D_in, H, D_out, data_loader, data_loader_steps,
                          num_epochs)

    trainer.summary()

    trainer.train()

    trainer.evaluate(data_loader, data_loader_steps)
Exemple #8
0
import os
import torch.optim as optim
import torch.nn as nn

from lpd.trainer import Trainer
from lpd.callbacks import SchedulerStep, StatsPrint, ModelCheckPoint, LossOptimizerHandler, CallbackMonitor, Tensorboard
from lpd.extensions.custom_schedulers import DoNothingToLR
from lpd.enums import Phase, State, MonitorType, StatsType, MonitorMode, MetricMethod
from lpd.metrics import BinaryAccuracyWithLogits, MetricBase, TruePositives, TrueNegatives, MetricConfusionMatrixBase
import lpd.utils.torch_utils as tu
import lpd.utils.general_utils as gu
import examples.utils as eu

gu.seed_all(42)  # BECAUSE ITS THE ANSWER TO LIFE AND THE UNIVERSE

save_to_dir = os.path.dirname(__file__) + '/trainer_checkpoint/'
trainer_file_name = 'trainer'


def get_parameters():
    # N is batch size; D_in is input dimension;
    # H is hidden dimension; D_out is output dimension.
    N, D_in, H, D_out = 64, 100, 100, 1
    num_epochs = 5
    data_loader = eu.examples_data_generator(N, D_in, D_out, binary_out=True)
    data_loader_steps = 100
    return N, D_in, H, D_out, num_epochs, data_loader, data_loader_steps


# LET'S CREATE A CUSTOM (ALTOUGH NOT SO INFORMATIVE) METRIC
class InaccuracyWithLogits(MetricBase):
Exemple #9
0
    def test_save_and_load(self):
        gu.seed_all(42)
        save_to_dir = os.path.dirname(__file__) + '/trainer_checkpoint/'
        trainer_file_name = 'trainer'

        device = tu.get_gpu_device_if_available()

        model = eu.get_basic_model(10, 10, 10).to(device)

        loss_func = nn.CrossEntropyLoss().to(device)
    
        optimizer = optim.Adam(model.parameters(), lr=1e-4)

        scheduler = KerasDecay(optimizer, 0.0001, last_step=-1)
        
        metrics = CategoricalAccuracyWithLogits(name='acc')

        callbacks = [   
                        LossOptimizerHandler(),
                        ModelCheckPoint(checkpoint_dir=save_to_dir, 
                                        checkpoint_file_name=trainer_file_name, 
                                        callback_monitor=CallbackMonitor(monitor_type=MonitorType.LOSS, 
                                                                         stats_type=StatsType.VAL, 
                                                                         monitor_mode=MonitorMode.MIN),
                                        save_best_only=False, 
                                        save_full_trainer=True,
                                        verbose=0),
                        SchedulerStep(apply_on_phase=Phase.BATCH_END, apply_on_states=State.TRAIN),
                        StatsPrint()
                    ]

        
        data_loader = eu.examples_data_generator(10, 10, 10, category_out=True)
        data_loader_steps = 100
        num_epochs = 5

        trainer = Trainer(model=model, 
                        device=device, 
                        loss_func=loss_func, 
                        optimizer=optimizer,
                        scheduler=scheduler,
                        metrics=metrics, 
                        train_data_loader=data_loader, 
                        val_data_loader=data_loader,
                        train_steps=data_loader_steps,
                        val_steps=data_loader_steps,
                        callbacks=callbacks,
                        name='Trainer-Test')
        
        trainer.train(num_epochs, verbose=0)

        loaded_trainer = Trainer.load_trainer(dir_path=save_to_dir,
                                            file_name=trainer_file_name + f'_epoch_{num_epochs}',
                                            model=model,
                                            device=device,
                                            loss_func=loss_func,
                                            optimizer=optimizer,
                                            scheduler=scheduler,
                                            train_data_loader=data_loader, 
                                            val_data_loader=data_loader,
                                            train_steps=data_loader_steps,
                                            val_steps=data_loader_steps)
        
        self.assertEqual(loaded_trainer.epoch, trainer.epoch)
        self.assertListEqual(tu.get_lrs_from_optimizer(loaded_trainer.optimizer), tu.get_lrs_from_optimizer(trainer.optimizer))
        self.assertEqual(loaded_trainer.callbacks[1].monitor._get_best(), trainer.callbacks[1].monitor._get_best())