Exemple #1
0
    print("ds.shape:", ds.output_shapes())

    steps_per_epoch = ds.get_dataset_size()

    netG = get_generator()
    netD = get_discriminator()

    pix2pix = Pix2Pix(generator=netG, discriminator=netD)

    d_loss_fn = D_Loss()
    g_loss_fn = G_Loss()
    d_loss_net = D_WithLossCell(backbone=pix2pix, loss_fn=d_loss_fn)
    g_loss_net = G_WithLossCell(backbone=pix2pix, loss_fn=g_loss_fn)

    d_opt = nn.Adam(pix2pix.netD.trainable_params(),
                    learning_rate=get_lr(),
                    beta1=args.beta1,
                    beta2=args.beta2,
                    loss_scale=1)
    g_opt = nn.Adam(pix2pix.netG.trainable_params(),
                    learning_rate=get_lr(),
                    beta1=args.beta1,
                    beta2=args.beta2,
                    loss_scale=1)

    train_net = TrainOneStepCell(loss_netD=d_loss_net,
                                 loss_netG=g_loss_net,
                                 optimizerD=d_opt,
                                 optimizerG=g_opt,
                                 sens=1)
    train_net.set_train()
Exemple #2
0
    print("ds.shape:", ds_val.output_shapes())

    steps_per_epoch = ds_val.get_dataset_size()

    netG = get_generator()
    netD = get_discriminator()

    pix2pix = Pix2Pix(generator=netG, discriminator=netD)

    d_loss_fn = D_Loss()
    g_loss_fn = G_Loss()
    d_loss_net = D_WithLossCell(backbone=pix2pix, loss_fn=d_loss_fn)
    g_loss_net = G_WithLossCell(backbone=pix2pix, loss_fn=g_loss_fn)

    d_opt = nn.Adam(pix2pix.netD.trainable_params(),
                    learning_rate=get_lr(),
                    beta1=args.beta1,
                    beta2=args.beta2,
                    loss_scale=1)
    g_opt = nn.Adam(pix2pix.netG.trainable_params(),
                    learning_rate=get_lr(),
                    beta1=args.beta1,
                    beta2=args.beta2,
                    loss_scale=1)

    train_net = TrainOneStepCell(loss_netD=d_loss_net,
                                 loss_netG=g_loss_net,
                                 optimizerD=d_opt,
                                 optimizerG=g_opt,
                                 sens=1)
    train_net.set_train()
Exemple #3
0
            context.set_auto_parallel_context(device_num=get_group_size(),
                                              parallel_mode=ParallelMode.DATA_PARALLEL,
                                              gradients_mean=True)
        else:
            context.set_context(device_id=args.device_id)
    netG = get_generator()
    netD = get_discriminator()

    pix2pix = Pix2Pix(generator=netG, discriminator=netD)

    d_loss_fn = D_Loss()
    g_loss_fn = G_Loss()
    d_loss_net = D_WithLossCell(backbone=pix2pix, loss_fn=d_loss_fn)
    g_loss_net = G_WithLossCell(backbone=pix2pix, loss_fn=g_loss_fn)

    d_opt = nn.Adam(pix2pix.netD.trainable_params(), learning_rate=get_lr(),
                    beta1=args.beta1, beta2=args.beta2, loss_scale=1)
    g_opt = nn.Adam(pix2pix.netG.trainable_params(), learning_rate=get_lr(),
                    beta1=args.beta1, beta2=args.beta2, loss_scale=1)

    train_net = TrainOneStepCell(loss_netD=d_loss_net, loss_netG=g_loss_net, optimizerD=d_opt, optimizerG=g_opt, sens=1)
    train_net.set_train()

    if not os.path.isdir(args.train_fakeimg_dir):
        os.makedirs(args.train_fakeimg_dir)
    if not os.path.isdir(args.loss_show_dir):
        os.makedirs(args.loss_show_dir)
    if not os.path.isdir(args.ckpt_dir):
        os.makedirs(args.ckpt_dir)

    # Training loop