from model import UNet, DNet import data_loader from data_loader import * ############################################################## # Initialise the generator and discriminator with the UNet and # DNet architectures respectively. generator = UNet(True) discriminator = DNet() ################################################################## # Utilize GPU for performing all the calculations performed in the # forward and backward passes. Thus allocate all the generator and # discriminator variables on the default GPU device. generator.cuda() discriminator.cuda() ################################################################### # Create ADAM optimizer for the generator as well the discriminator. # Create loss criterion for calculating the L1 and adversarial loss. d_optimizer = optim.Adam(discriminator.parameters(), betas=(0.5, 0.999), lr=0.0002) g_optimizer = optim.Adam(generator.parameters(), betas=(0.5, 0.999), lr=0.0002) d_criterion = nn.BCELoss() g_criterion_1 = nn.BCELoss() g_criterion_2 = nn.L1Loss() train_() def train_(): """
opt = parser.parse_args() if torch.cuda.is_available() and not opt.cuda: print( "WARNING: You have a CUDA device, so you should probably run with --cuda" ) Gnet_AB = GNet(opt.G_init_filter, opt.G_depth, opt.G_width) Gnet_BA = GNet(opt.G_init_filter, opt.G_depth, opt.G_width) Dnet_A = DNet(opt.D_init_filter, opt.D_depth) Dnet_B = DNet(opt.D_init_filter, opt.D_depth) if opt.cuda: Gnet_AB.cuda() Gnet_BA.cuda() Dnet_A.cuda() Dnet_B.cuda() # Weight Initialization from a Gaussian distribution N(0, 0:02) Gnet_AB.apply(weights_init_normal) Gnet_BA.apply(weights_init_normal) Dnet_A.apply(weights_init_normal) Dnet_B.apply(weights_init_normal) # Lossess L_GAN = nn.MSELoss() L_cyc = nn.L1Loss() L_identity = nn.L1Loss() if opt.cuda: L_GAN.cuda()