Esempio n. 1
0
def get_trainer(N, D_in, H, D_out, data_loader, data_loader_steps):

    device = tu.get_gpu_device_if_available()

    model = eu.get_basic_model(D_in, H, D_out).to(device)

    loss_func = nn.MSELoss(reduction='sum').to(device)

    optimizer = optim.Adam(model.parameters(), lr=1e-4)

    scheduler = DoNothingToLR(
    )  #CAN ALSO USE scheduler=None, BUT DoNothingToLR IS MORE EXPLICIT

    metrics = None  # THIS EXAMPLE DOES NOT USE METRICS, ONLY LOSS

    callbacks = [LossOptimizerHandler(), StatsPrint()]

    trainer = Trainer(model=model,
                      device=device,
                      loss_func=loss_func,
                      optimizer=optimizer,
                      scheduler=scheduler,
                      metrics=metrics,
                      train_data_loader=data_loader,
                      val_data_loader=data_loader,
                      train_steps=data_loader_steps,
                      val_steps=data_loader_steps,
                      callbacks=callbacks,
                      name='Train-Evaluate-Predict-Example')
    return trainer
Esempio n. 2
0
def get_trainer_base(D_in, H, D_out):
    device = tu.get_gpu_device_if_available()

    model = eu.get_basic_model(D_in, H, D_out).to(device)

    loss_func = nn.BCEWithLogitsLoss().to(device)
   
    optimizer = optim.Adam(model.parameters(), lr=1e-4)

    scheduler = DoNothingToLR() #CAN ALSO USE scheduler=None, BUT DoNothingToLR IS MORE EXPLICIT
    
    metrics = BinaryAccuracyWithLogits(name='Accuracy')

    return device, model, loss_func, optimizer, scheduler, metrics
Esempio n. 3
0
    def __init__(self,
                 model,
                 device,
                 loss_func,
                 optimizer,
                 scheduler,
                 metrics,
                 train_data_loader,
                 val_data_loader,
                 train_steps,
                 val_steps,
                 callbacks=None,
                 name='lpd'):
        self.device = device
        self.model = model
        self.loss_func = loss_func
        self.optimizer = optimizer
        self.scheduler = scheduler if scheduler else DoNothingToLR()
        self.metrics = metrics if metrics else []
        self._validate_metrics()
        self.train_data_loader = train_data_loader
        self.val_data_loader = val_data_loader
        self.train_steps = train_steps
        self.val_steps = val_steps
        self.callbacks = callbacks if callbacks else []
        self.name = name

        self.epoch = 0
        self.sample_count = 0
        self.sample_count_in_epoch = 0
        self.iteration = 0
        self.iteration_in_epoch = 0

        self.state = State.EXTERNAL
        self.phase = Phase.IDLE
        self.train_stats = TrainerStats(self.metrics)
        self.train_last_loss = None
        self.val_stats = TrainerStats(self.metrics)
        self.val_last_loss = None
        self.test_stats = TrainerStats(self.metrics)
        self.test_last_loss = None

        self._stopped = False

        self._last_data = {s: InputOutputLabel() for s in State}

        self._total_num_epochs = 0

        # CANNOT STORE SUMMARY WRITERS INSIDE THE CALLBACK ITSELF, SINCE WE CAN'T PICKLE IT (IN MODEL-CHECKPOINT), IT WILL BE HANDLED HERE IN THE TRAINER
        self._summary_writers = {}
Esempio n. 4
0
def get_trainer_base(D_in, H, D_out):
    device = tu.get_gpu_device_if_available()

    model = eu.get_basic_model(D_in, H, D_out).to(device)

    loss_func = nn.BCEWithLogitsLoss().to(device)

    optimizer = optim.Adam(model.parameters(), lr=1e-4)

    scheduler = DoNothingToLR(
    )  #CAN ALSO USE scheduler=None, BUT DoNothingToLR IS MORE EXPLICIT

    metrics = [
        BinaryAccuracyWithLogits(name='Accuracy'),
        InaccuracyWithLogits(name='InAccuracy'),
        TruePositives(num_classes=2, threshold=0.0),
        TrueNegatives(num_classes=2, threshold=0.0),
        Truthfulness(name='Truthfulness')
    ]

    return device, model, loss_func, optimizer, scheduler, metrics
Esempio n. 5
0
def get_trainer_base(D_in, H, D_out, num_classes):
    device = tu.get_gpu_device_if_available()

    model = eu.get_basic_model(D_in, H, D_out).to(device)

    loss_func = nn.CrossEntropyLoss().to(device)

    optimizer = optim.Adam(model.parameters(), lr=1e-4)

    scheduler = DoNothingToLR(
    )  #CAN ALSO USE scheduler=None, BUT DoNothingToLR IS MORE EXPLICIT

    labels = ['Cat', 'Dog', 'Bird']
    metrics = [
        TruePositives(num_classes, labels=labels, threshold=0),
        FalsePositives(num_classes, labels=labels, threshold=0),
        TrueNegatives(num_classes, labels=labels, threshold=0),
        FalseNegatives(num_classes, labels=labels, threshold=0)
    ]

    return device, model, loss_func, optimizer, scheduler, metrics
Esempio n. 6
0
    def load_trainer(dir_path, file_name, model, device, loss_func, optimizer,
                     scheduler, train_data_loader, val_data_loader,
                     train_steps, val_steps):
        full_path = dir_path + file_name
        checkpoint = torch.load(full_path, map_location=device)
        print(f'[Trainer] - Loading from {full_path}')
        model.load_state_dict(checkpoint['model'])
        loss_func.load_state_dict(checkpoint['loss_func'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler = scheduler if scheduler else DoNothingToLR()
        scheduler.load_state_dict(checkpoint['scheduler'])

        trainer = Trainer(model=model,
                          device=device,
                          loss_func=loss_func,
                          optimizer=optimizer,
                          scheduler=scheduler,
                          metrics=checkpoint['metrics'],
                          train_data_loader=train_data_loader,
                          val_data_loader=val_data_loader,
                          train_steps=train_steps,
                          val_steps=val_steps,
                          callbacks=checkpoint['callbacks'],
                          name=checkpoint['name'])

        if 'epoch' in checkpoint:
            trainer.epoch = checkpoint['epoch']
        if 'iteration' in checkpoint:
            trainer.iteration = checkpoint['iteration']
        if 'sample_count' in checkpoint:
            trainer.sample_count = checkpoint['sample_count']
        if 'train_stats' in checkpoint:
            trainer.train_stats = checkpoint['train_stats']
        if 'val_stats' in checkpoint:
            trainer.val_stats = checkpoint['val_stats']
        if 'test_stats' in checkpoint:
            trainer.test_stats = checkpoint['test_stats']

        return trainer
Esempio n. 7
0
def get_trainer(D_in, H, D_out, data_loader, data_loader_steps, num_epochs):
    device = tu.get_gpu_device_if_available()

    # Use the nn package to define our model and loss function.
    model = nn.Sequential(Dense(D_in, H, use_bias=True, activation=F.relu),
                          Dense(H, D_out, use_bias=True,
                                activation=None)).to(device)

    loss_func = nn.MSELoss(reduction='sum')

    optimizer = optim.Adam(model.parameters(), lr=1e-4)

    scheduler = DoNothingToLR(
        optimizer=optimizer
    )  #CAN ALSO USE scheduler=None, BUT DoNothingToLR IS MORE EXPLICIT

    metric_name_to_func = None  # THIS EXAMPLE DOES NOT USE METRICS, ONLY LOSS

    callbacks = [
        SchedulerStep(),
        EpochEndStats(cb_phase=cbs.CB_ON_EPOCH_END, round_values_on_print_to=7)
    ]

    trainer = Trainer(model=model,
                      device=device,
                      loss_func=loss_func,
                      optimizer=optimizer,
                      scheduler=scheduler,
                      metric_name_to_func=metric_name_to_func,
                      train_data_loader=data_loader,
                      val_data_loader=data_loader,
                      train_steps=data_loader_steps,
                      val_steps=data_loader_steps,
                      num_epochs=num_epochs,
                      callbacks=callbacks,
                      name='Basic-Example')
    return trainer