コード例 #1
0
    def __init__(self,
                 model,
                 criterion,
                 optimizer,
                 run_name,
                 device=torch.device('cpu')):
        self.model = model
        self.criterion = criterion
        self.optimizer = optimizer
        self.device = device
        self.run_name = run_name

        logs_path = create_dirs(f'logs/{run_name}')
        self.train_writer = SummaryWriter(join_path(logs_path, 'train'))
        self.val_writer = SummaryWriter(join_path(logs_path, 'val'))
コード例 #2
0
from datasets.cifar10_count_dataset import CIFAR10CountDataset
from utils.system import create_dirs

image_grid_distribution = (3, 3)

train_set = CIFAR10CountDataset('./data/CIFAR10',
                                image_grid_distribution,
                                template_view='raw',
                                train=True)
test_set = CIFAR10CountDataset('./data/CIFAR10',
                               image_grid_distribution,
                               template_view='raw',
                               train=False)

create_dirs('./data/CIFAR10Count')
create_dirs('./data/CIFAR10Count/templates')
image_grid, templates, counts = train_set[0]
for i, template in enumerate(templates):
    template.save(
        './data/CIFAR10Count/templates/' + train_set.class_names[i] + '.jpg',
        'JPEG')

create_dirs('./data/CIFAR10Count/train')
create_dirs('./data/CIFAR10Count/train/images')
create_dirs('./data/CIFAR10Count/train/counts')
with open('./data/CIFAR10Count/train/counts/counts.txt', 'w') as counts_file:
    for i, data in enumerate(train_set):
        image_grid, _, counts = data
        counts = [count[0] for count in counts.astype(np.int32)]
        image_grid.save(f'./data/CIFAR10Count/train/images/{i}.jpg', 'JPEG')
コード例 #3
0
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=1e-3)

    init_epoch = 0
    # model = nn.DataParallel(model)
    if file_exists('./trained_models/checkpoints/' + run_name +
                   '_checkpoint.pth'):
        print("Loading checkpoint.", flush=True)
        checkpoint = torch.load('./trained_models/checkpoints/' + run_name +
                                '_checkpoint.pth')
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        init_epoch = checkpoint['epoch']
        print("Init epoch:", init_epoch, flush=True)

        model.train()

    system.create_dirs('trained_models')
    system.create_dirs('trained_models/checkpoints')
    trainer = VAETrainer(model,
                         criterion,
                         optimizer,
                         run_name,
                         device=device,
                         init_epoch=init_epoch)
    trainer.train(epochs, train_loader, val_loader)

    torch.save(model.state_dict(), './trained_models/' + run_name + '.pt')

    # trainer.evaluate(test_loader)