Beispiel #1
0
    def __init__(self, *, model: Module, optimizer: Optional[torch.optim.Adam],
                 loss_func: Callable):
        self.loss_func = loss_func
        self.optimizer = optimizer
        self.model = model

        tracker.set_histogram(".loss", is_print=True)

        if self.optimizer is None:
            tracker.set_tensor('.output')
Beispiel #2
0
    def run(self):
        pytorch_utils.add_model_indicators(self.model)

        tracker.set_queue("train.loss", 20, True)
        tracker.set_histogram("valid.loss", True)
        tracker.set_scalar("valid.accuracy", True)

        for _ in self.training_loop:
            self.train()
            self.test()
            if self.is_log_parameters:
                pytorch_utils.store_model_indicators(self.model)
Beispiel #3
0
    def run(self):
        # Training and testing
        pytorch_utils.add_model_indicators(self.model)

        tracker.set_queue("train.loss", 20, True)
        tracker.set_histogram("valid.loss", True)
        tracker.set_scalar("valid.accuracy", True)
        tracker.set_indexed_scalar('valid.sample_loss')
        tracker.set_indexed_scalar('valid.sample_pred')

        test_data = np.array([d[0].numpy() for d in self.valid_dataset])
        experiment.save_numpy("valid.data", test_data)

        for _ in self.training_loop:
            self.train()
            self.valid()
            if self.is_log_parameters:
                pytorch_utils.store_model_indicators(self.model)
Beispiel #4
0
 def __init__(self):
     """
     Set tracking indicators
     """
     tracker.set_histogram(f'strategy.*')
     tracker.set_histogram(f'average_strategy.*')
     tracker.set_histogram(f'regret.*')
Beispiel #5
0
    def __init__(self, *, create_new_history, epochs,
                 is_online_update=False, n_players=2,
                 track_frequency=10,
                 save_frequency=10):
        self.save_frequency = save_frequency
        self.track_frequency = track_frequency
        self.n_players = n_players
        self.is_online_update = is_online_update
        self.epochs = epochs
        self.create_new_history = create_new_history
        self.info_sets = {}

        tracker.set_histogram(f'strategy.*')
        tracker.set_histogram(f'average_strategy.*')
        tracker.set_histogram(f'regret.*')
        tracker.set_histogram(f'current_regret.*')
Beispiel #6
0
def main():
    # set indicator types
    tracker.set_queue("train_loss", 20, True)
    tracker.set_histogram("valid_loss", True)
    tracker.set_scalar("valid_accuracy", True)

    epochs = 10

    train_batch_size = 64
    test_batch_size = 1000

    use_cuda = True
    cuda_device = 0
    seed = 5
    train_log_interval = 10

    learning_rate = 0.01

    # get device
    is_cuda = use_cuda and torch.cuda.is_available()
    if not is_cuda:
        device = torch.device("cpu")
    else:
        if cuda_device < torch.cuda.device_count():
            device = torch.device(f"cuda:{cuda_device}")
        else:
            print(f"Cuda device index {cuda_device} higher than "
                  f"device count {torch.cuda.device_count()}")

            device = torch.device(f"cuda:{torch.cuda.device_count() - 1}")

    # data transform
    data_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])

    # train loader
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        str(lab.get_data_path()),
        train=True,
        download=True,
        transform=data_transform),
                                               batch_size=train_batch_size,
                                               shuffle=True)

    # test loader
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        str(lab.get_data_path()),
        train=False,
        download=True,
        transform=data_transform),
                                              batch_size=test_batch_size,
                                              shuffle=False)

    # model
    model = Net().to(device)

    # optimizer
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # set seeds
    torch.manual_seed(seed)

    # only for logging purposes
    configs = {
        'epochs': epochs,
        'train_batch_size': train_batch_size,
        'test_batch_size': test_batch_size,
        'use_cuda': use_cuda,
        'cuda_device': cuda_device,
        'seed': seed,
        'train_log_interval': train_log_interval,
        'learning_rate': learning_rate,
        'device': device,
        'train_loader': train_loader,
        'test_loader': test_loader,
        'model': model,
        'optimizer': optimizer,
    }

    # create the experiment
    experiment.create(name='tracker')

    # experiment configs
    experiment.calculate_configs(configs)

    # pyTorch model
    experiment.add_pytorch_models(dict(model=model))

    experiment.start()

    # training loop
    for epoch in range(1, epochs + 1):
        train(model, optimizer, train_loader, device, train_log_interval)
        test(model, test_loader, device)
        logger.log()

    # save the model
    experiment.save_checkpoint()
Beispiel #7
0
def add_model_indicators(model: torch.nn.Module, model_name: str = "model"):
    for name, param in model.named_parameters():
        if param.requires_grad:
            tracker.set_histogram(f"{model_name}.{name}")
            tracker.set_histogram(f"{model_name}.{name}.grad")
Beispiel #8
0
    def startup(self):
        pytorch_utils.add_model_indicators(self.model)

        tracker.set_queue("train.loss", 20, True)
        tracker.set_histogram("valid.loss", True)
        tracker.set_scalar("valid.accuracy", True)
Beispiel #9
0
def main():
    # ✨ Set the types of the stats/indicators.
    # They default to scalars if not specified
    tracker.set_queue('loss.train', 20, True)
    tracker.set_histogram('loss.valid', True)
    tracker.set_scalar('accuracy.valid', True)

    # Configurations
    configs = {
        'epochs': 10,
        'train_batch_size': 64,
        'valid_batch_size': 100,
        'use_cuda': True,
        'seed': 5,
        'train_log_interval': 10,
        'learning_rate': 0.01,
    }

    is_cuda = configs['use_cuda'] and torch.cuda.is_available()
    if not is_cuda:
        device = torch.device("cpu")
    else:
        device = torch.device(f"cuda:0")

    data_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])

    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST(str(lab.get_data_path()),
                       train=True,
                       download=True,
                       transform=data_transform),
        batch_size=configs['train_batch_size'], shuffle=True)

    valid_loader = torch.utils.data.DataLoader(
        datasets.MNIST(str(lab.get_data_path()),
                       train=False,
                       download=True,
                       transform=data_transform),
        batch_size=configs['valid_batch_size'], shuffle=False)

    model = Net().to(device)
    optimizer = optim.Adam(model.parameters(), lr=configs['learning_rate'])

    torch.manual_seed(configs['seed'])

    # ✨ Create the experiment
    experiment.create(name='mnist_labml_tracker')

    # ✨ Save configurations
    experiment.configs(configs)

    # ✨ Set PyTorch models for checkpoint saving and loading
    experiment.add_pytorch_models(dict(model=model))

    # ✨ Start and monitor the experiment
    with experiment.start():
        #
        for epoch in range(1, configs['epochs'] + 1):
            train(model, optimizer, train_loader, device, configs['train_log_interval'])
            validate(model, valid_loader, device)
            logger.log()

    # ✨ Save the models
    experiment.save_checkpoint()