import torch from tqdm import tqdm from model import VQVAE args = config.get_args() transform = config.get_transform() dataset = datasets.ImageFolder(args.path, transform=transform) loader = DataLoader(dataset, batch_size=args.batch, shuffle=True, num_workers=0) model = VQVAE() model = model.cuda() criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=args.lr) from torch.autograd import Variable for epoch in range(args.epoch): loader = tqdm(loader) for i, (img, _) in enumerate(loader): img = img.cuda() #generate the attention regions for the images saliency = utilities.compute_saliency_maps(img, model)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} train_loader = torch.utils.data.DataLoader(datasets.MNIST( '../data', train=True, download=True, transform=transforms.ToTensor()), batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader(datasets.MNIST( '../data', train=False, transform=transforms.ToTensor()), batch_size=args.batch_size, shuffle=True, **kwargs) model = VQVAE(args.input_dim, args.emb_dim, args.emb_num, args.batch_size) if args.cuda: model.cuda() optimizer = optim.Adam(model.parameters(), lr=1e-3) def train(epoch): """run one epoch of model to train with data loader""" model.train() train_loss = 0 for batch_idx, (data, _) in enumerate(train_loader): data = Variable(data).view(-1, 784) if args.cuda: data = data.cuda() # run forward # compute losses recon_batch, reconst_loss, embed_loss, commit_loss = model(data)