def main(args): device = "cuda" #if args.use_cuda else "cpu" # MODEL_DICT[args.model].reproduce( # args.n_epochs, args.batch_size, args.log_dir, device # ) train_loader, test_loader = datasets.get_mnist_loaders( args.batch_size, dynamically_binarize=True) model = models.PixelCNN( in_channels=1, out_channels=1, n_residual=15, residual_channels=16, head_channels=32, ) model.load_state_dict(torch.load(args.ckpt)) model = model.to(device) sample_fn = model.sample tensor = sample_fn(model) if not os.path.exists(args.save_folder): os.mkdir(args.save_folder) tensor = torch.squeeze(tensor) for i in range(len(tensor)): img = tensor[i].detach().cpu().numpy() #breakpoint() cv2.imwrite(args.save_folder + '/' + str(i) + '.png', img * 255) print(str(i), ' saved')
def test_PixelCNN(self): model = models.PixelCNN(in_channels=3, out_channels=3, n_residual=1, residual_channels=1, head_channels=1) self._smoke_test(model, in_channels=3)
def reproduce(n_epochs=457, batch_size=256, log_dir="/tmp/run", device="cuda", debug_loader=None): """Training script with defaults to reproduce results. The code inside this function is self contained and can be used as a top level training script, e.g. by copy/pasting it into a Jupyter notebook. Args: n_epochs: Number of epochs to train for. batch_size: Batch size to use for training and evaluation. log_dir: Directory where to log trainer state and TensorBoard summaries. device: Device to train on (either 'cuda' or 'cpu'). debug_loader: Debug DataLoader which replaces the default training and evaluation loaders if not 'None'. Do not use unless you're writing unit tests. """ from torch import optim from torch.nn import functional as F from torch.optim import lr_scheduler from pytorch_generative import datasets from pytorch_generative import models from pytorch_generative import trainer train_loader, test_loader = debug_loader, debug_loader if train_loader is None: train_loader, test_loader = datasets.get_mnist_loaders( batch_size, dynamically_binarize=True) model = models.PixelCNN( in_channels=1, out_channels=1, n_residual=15, residual_channels=16, head_channels=32, ) optimizer = optim.Adam(model.parameters(), lr=1e-3) scheduler = lr_scheduler.MultiplicativeLR(optimizer, lr_lambda=lambda _: 0.999977) def loss_fn(x, _, preds): batch_size = x.shape[0] x, preds = x.view((batch_size, -1)), preds.view((batch_size, -1)) loss = F.binary_cross_entropy_with_logits(preds, x, reduction="none") return loss.sum(dim=1).mean() trainer = trainer.Trainer( model=model, loss_fn=loss_fn, optimizer=optimizer, train_loader=train_loader, eval_loader=test_loader, lr_scheduler=scheduler, log_dir=log_dir, device=device, ) trainer.interleaved_train_and_eval(n_epochs)
def test_PixelCNN_multiple_channels(self): model = models.PixelCNN( in_channels=3, out_channels=3, n_residual=1, residual_channels=1, head_channels=1, ) self._smoke_test(model)
def test_PixelCNN(self): model = models.PixelCNN( in_channels=3, out_channels=3, n_residual=1, residual_channels=1, head_channels=1, ) self._test_multiple_channels(model, conditional_sample=True)
def reproduce( n_epochs=457, batch_size=256, log_dir="/tmp/run", device="cuda", debug_loader=None ): """Training script with defaults to reproduce results. The code inside this function is self contained and can be used as a top level training script, e.g. by copy/pasting it into a Jupyter notebook. Args: n_epochs: Number of epochs to train for. batch_size: Batch size to use for training and evaluation. log_dir: Directory where to log trainer state and TensorBoard summaries. device: Device to train on (either 'cuda' or 'cpu'). debug_loader: Debug DataLoader which replaces the default training and evaluation loaders if not 'None'. Do not use unless you're writing unit tests. """ from torch import optim from torch.nn import functional as F from torch.optim import lr_scheduler from torch.utils import data from torchvision import datasets from torchvision import transforms from pytorch_generative import trainer from pytorch_generative import models #################################################################################################################### #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~EB~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Load ImageGPT Data : import gmpm import ipdb; ipdb.set_trace() train = gmpm.train test = gmpm.test #################################################################################################################### transform = transforms.Compose( [transforms.ToTensor(), lambda x: distributions.Bernoulli(probs=x).sample()] ) train_loader = debug_loader or data.DataLoader( datasets.MNIST("/tmp/data", train=True, download=True, transform=transform), batch_size=batch_size, shuffle=True, num_workers=8, ) test_loader = debug_loader or data.DataLoader( datasets.MNIST("/tmp/data", train=False, download=True, transform=transform), batch_size=batch_size, num_workers=8, ) model = models.PixelCNN( in_channels=1, out_channels=1, n_residual=15, residual_channels=16, head_channels=32, ) optimizer = optim.Adam(model.parameters(), lr=1e-3) scheduler = lr_scheduler.MultiplicativeLR(optimizer, lr_lambda=lambda _: 0.999977) def loss_fn(x, _, preds): batch_size = x.shape[0] x, preds = x.view((batch_size, -1)), preds.view((batch_size, -1)) loss = F.binary_cross_entropy_with_logits(preds, x, reduction="none") return loss.sum(dim=1).mean() trainer = trainer.Trainer( model=model, loss_fn=loss_fn, optimizer=optimizer, train_loader=train_loader, eval_loader=test_loader, lr_scheduler=scheduler, log_dir=log_dir, device=device, ) trainer.interleaved_train_and_eval(n_epochs)