Exemplo n.º 1
0
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default = None)
parser.add_argument('--input_nc', type=int, default=3, help='number of channels of input data')
parser.add_argument('--output_nc', type=int, default=3, help='number of channels of output data')
parser.add_argument('--upsample', default='transconv', choices=['ori', 'transconv', 'nearest', 'bilinear'], help='which upsample method to use in generater')
opt = parser.parse_args()
print(opt)

random.seed(opt.seed)
torch.manual_seed(opt.seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(opt.seed)

# Networks
if opt.upsample == 'ori':
    netG_A2B = Generator_ori(opt.input_nc, opt.output_nc)
    netG_B2A = Generator_ori(opt.output_nc, opt.input_nc)
else:
    netG_A2B = Generator(opt.input_nc, opt.output_nc)
    netG_B2A = Generator(opt.output_nc, opt.input_nc)
netD_A = Discriminator(opt.input_nc)
netD_B = Discriminator(opt.output_nc)

netG_A2B.cuda()
netG_B2A.cuda()
netD_A.cuda()
netD_B.cuda()

netG_A2B.apply(weights_init_normal)
netG_B2A.apply(weights_init_normal)
netD_A.apply(weights_init_normal)
Exemplo n.º 2
0
## mkdir:
dataset_dir = os.path.join('datasets', args.dataset)
output_dir = os.path.join(
    'output_{}_{}_oneshot'.format(args.upsample, args.percent), args.dataset)
img_dir = os.path.join(output_dir, 'imgs')
pth_dir = os.path.join(output_dir, 'pth')
if not os.path.isdir(img_dir):
    os.makedirs(img_dir)
if not os.path.isdir(pth_dir):
    os.makedirs(pth_dir)

###### Definition of variables ######
# Networks
if args.upsample == 'ori':
    netG_A2B = Generator_ori(args.input_nc, args.output_nc)
    netG_B2A = Generator_ori(args.output_nc, args.input_nc)
else:
    netG_A2B = Generator(args.input_nc, args.output_nc)
    netG_B2A = Generator(args.output_nc, args.input_nc)
netD_A = Discriminator(args.input_nc)
netD_B = Discriminator(args.output_nc)

netG_A2B.apply(weights_init_normal)
netG_B2A.apply(weights_init_normal)
netD_A.apply(weights_init_normal)
netD_B.apply(weights_init_normal)

netG_A2B.cuda()
netG_B2A.cuda()
netD_A.cuda()
Exemplo n.º 3
0
parser.add_argument('--evaluate-all', action="store_true")
parser.add_argument("--percent", type=float)
parser.add_argument("--extra-suffix", type=str, default=None)
parser.add_argument('--pruned', action="store_true")
# parser.add_argument('--model_id', type=int, default=199, help='indicate the model id to specify the x epoch\'s models')
opt = parser.parse_args()
print(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
dataset_dir = os.path.join('datasets', opt.dataset)
from utils import validate

###### Definition of variables ######
# Networks
with torch.no_grad():
    if opt.upsample == 'ori':
        netG_A2B = Generator_ori(opt.input_nc, opt.output_nc)
        netG_B2A = Generator_ori(opt.output_nc, opt.input_nc)
    else:
        netG_A2B = Generator(opt.input_nc, opt.output_nc)
        netG_B2A = Generator(opt.output_nc, opt.input_nc)

netG_A2B.cuda()
netG_B2A.cuda()

# Load state dicts
if opt.extra_suffix is not None:
    path_to_dir = "output_{}_{}_{}".format(opt.upsample, opt.percent,
                                           opt.extra_suffix)
elif opt.percent is None:
    path_to_dir = "output_{}".format(opt.upsample)
else:
Exemplo n.º 4
0
torch.backends.cudnn.benchmark = True

## mkdir:
dataset_dir = os.path.join('datasets', opt.dataset)
output_dir = os.path.join('output_%s' % opt.upsample, opt.dataset)
img_dir = os.path.join(output_dir, 'imgs')
pth_dir = os.path.join(output_dir, 'pth')
if not os.path.isdir(img_dir):
    os.makedirs(img_dir)
if not os.path.isdir(pth_dir):
    os.makedirs(pth_dir)

###### Definition of variables ######
# Networks
if opt.upsample == 'ori':
    netG_A2B = Generator_ori(opt.input_nc, opt.output_nc)
    netG_B2A = Generator_ori(opt.output_nc, opt.input_nc)
else:
    netG_A2B = Generator(opt.input_nc, opt.output_nc)
    netG_B2A = Generator(opt.output_nc, opt.input_nc)
netD_A = Discriminator(opt.input_nc)
netD_B = Discriminator(opt.output_nc)

netG_A2B.load_state_dict(
    torch.load(
        os.path.join(opt.rand, 'netG_A2B_seed_{}.pth.tar'.format(opt.seed))))
netG_B2A.load_state_dict(
    torch.load(
        os.path.join(opt.rand, 'netG_B2A_seed_{}.pth.tar'.format(opt.seed))))
netD_A.load_state_dict(
    torch.load(
Exemplo n.º 5
0
parser.add_argument('--gpu', default='3')
parser.add_argument('--input-dir', type=str)
parser.add_argument('--output-dir', type=str)
parser.add_argument('--evaluate-all', action="store_true")
# parser.add_argument('--model_id', type=int, default=199, help='indicate the model id to specify the x epoch\'s models')
opt = parser.parse_args()
print(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
dataset_dir = os.path.join('datasets', opt.dataset)
from utils import validate

###### Definition of variables ######
# Networks
with torch.no_grad():
    if opt.upsample == 'ori':
        netG_A2B = Generator_ori(opt.input_nc, opt.output_nc)
        netG_B2A = Generator_ori(opt.output_nc, opt.input_nc)
    else:
        netG_A2B = Generator(opt.input_nc, opt.output_nc)
        netG_B2A = Generator(opt.output_nc, opt.input_nc)

netG_A2B.cuda()
netG_B2A.cuda()

# Load state dicts
if not opt.evaluate_all:
    generator_A2B = os.path.join(opt.input_dir, opt.dataset, 'pth', 'netG_A2B_epoch_%d.pth' % 199)
    generator_B2A = os.path.join(opt.input_dir, opt.dataset, 'pth', 'netG_B2A_epoch_%d.pth' % 199)
    #generator_A2B  = "output/netG_A2B.pth"
    #generator_B2A  = "output/netG_B2A.pth"
    netG_A2B.load_state_dict(torch.load(generator_A2B))