"WARNING: You have a CUDA device, so you should probably run with --cuda" ) # need initialize!! G_xvz = _G_xvz() G_vzx = _G_vzx() D_xvs = _D_xvs() G_xvz.apply(weights_init) G_vzx.apply(weights_init) D_xvs.apply(weights_init) train_list = args.data_list train_loader = torch.utils.data.DataLoader(data_loader.ImageList( train_list, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ])), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) def L1_loss(x, y): return torch.mean(torch.sum(torch.abs(x - y), 1)) v_siz = 9 z_siz = 128 - v_siz x1 = torch.FloatTensor(args.batch_size, 3, 128, 128)
# 3 networks have been inherits from nn.Module G_xvz = _G_xvz() G_vzx = _G_vzx() D_xvs = _D_xvs() # Initialize weights for networks G_xvz.apply(weights_init) G_vzx.apply(weights_init) D_xvs.apply(weights_init) train_list = args.data_list # Path to image list for training # Dataloader is used to wrap outside of torch.utils.data.Dataset (or our custormized dataset - ImageList) for loading data efficiently train_loader = torch.utils.data.DataLoader( data_loader.ImageList( train_list, transform=transforms.Compose([ transforms.ToTensor(), # to range (0,1) transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ])), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) def L1_loss(x, y): return torch.mean(torch.sum(torch.abs(x - y), 1)) v_siz = 9 z_siz = 128 - v_siz x1 = torch.FloatTensor(args.batch_size, 3, 128, 128)