fn_generate = masked_fake_output #A function that takes distorted_A as input and outputs fake_A. fn_mask = torch.cat( (alpha, alpha, alpha), 0) #A function that takes distorted_A as input and outputs mask_A. fn_abgr = torch.cat( (alpha, rgb), 0 ) #A function that takes distorted_A as input and outputs concat([mask_A, fake_A]). return input, fake_output, alpha, fn_generate, fn_mask, fn_abgr A, fake_A, mask_A, path_A, path_mask_A, path_abgr_A = cycle_variables( input, netG_A) B, fake_B, mask_B, path_B, path_mask_B, path_abgr_B = cycle_variables( input, netG_B) real_A = Image.Resize(real_A, (64, 64)) real_B = Image.Resize(real_B, (64, 64)) ################################################################ whom2whom = "BtoA" # default trainsforming faceB to faceA if whom2whom is "AtoB": path_func = path_abgr_B elif whom2whom is "BtoA": path_func = path_abgr_A else: print("whom2whom should be either AtoB or BtoA") ############################################################### use_smoothed_mask = True use_smoothed_bbox = True