Beispiel #1
0
        for i in range(args.n_attrs):
            tmp = att_a.clone()
            for j in range(args.n_attrs):
                tmp[:, j] = -1
            tmp[:, i] = 1
            tmp = check_attribute_conflict(tmp, args.attrs[i], args.attrs)
            att_b_list.append(tmp)
    else:
        for i in range(args.n_attrs):
            tmp = att_a.clone()
            tmp[:, i] = 1 - tmp[:, i]
            tmp = check_attribute_conflict(tmp, args.attrs[i], args.attrs)
            att_b_list.append(tmp)  # a list of lists of attributes (probably).

    with torch.no_grad():
        samples = [img_a]
        for i, att_b in enumerate(att_b_list):
            att_b_ = (att_b * 2 - 1) * args.thres_int
            if i > 0:
                att_b_[..., i - 1] = att_b_[..., i - 1] * args.test_int / args.thres_int
            samples.append(attgan.G(img_a, att_b_))
        samples = torch.cat(samples, dim=3)
        if args.custom_img:
            out_file = test_dataset.images[idx]
        else:
            out_file = '{:06d}.jpg'.format(idx + 182638)
        vutils.save_image(
            samples, join(output_path, out_file),
            nrow=1, normalize=True, range=(-1., 1.)
        )
        print('{:s} done!'.format(out_file))
Beispiel #2
0
         os.path.join('output', args.experiment_name, 'checkpoint',
                      'weights.{:d}.pth'.format(epoch)))
     # attgan.save(os.path.join(
     #     'output', args.experiment_name, 'checkpoint', 'weights.{:d}.pth'.format(epoch)
     # ))
 if (it + 1) % args.sample_interval == 0:
     attgan.eval()
     with torch.no_grad():
         samples = [fixed_img_a]
         for i, att_b in enumerate(sample_att_b_list):
             att_b_ = (att_b * 2 - 1) * args.thres_int
             if i > 0:
                 att_b_[..., i -
                        1] = att_b_[..., i -
                                    1] * args.test_int / args.thres_int
             samples.append(attgan.G(fixed_img_a, att_b_))
         samples = torch.cat(samples, dim=3)
         writer.add_image(
             'sample',
             vutils.make_grid(samples,
                              nrow=1,
                              normalize=True,
                              range=(-1., 1.)), it + 1)
         vutils.save_image(samples,
                           os.path.join(
                               'output', args.experiment_name,
                               'sample_training',
                               'Epoch_({:d})_({:d}of{:d}).jpg'.format(
                                   epoch, it % it_per_epoch + 1,
                                   it_per_epoch)),
                           nrow=1,
Beispiel #3
0
    img_unit = ((img_unit * 0.5) + 0.5) * 255
    img_unit = np.uint8(img_unit)
    img_unit = img_unit[::-1, :, :].transpose(1, 2, 0)
    for i in range(args.n_attrs):
        tmp = att_org.clone()
        tmp[:, i] = 1 - tmp[:, i]
        tmp = check_attribute_conflict(tmp, args.attrs[i], args.attrs)
        att_list.append(tmp)

    if args_.use_model == 'generator':
        with torch.no_grad():
            samples = [img_real]
            for i, att_tar in enumerate(att_list):
                if i > 0:
                    att_diff = att_tar - att_org
                    samples.append(attgan.G(img_real, att_diff))
            samples = torch.cat(samples, dim=3)
            out_file = '{:06d}.jpg'.format(idx)
            vutils.save_image(samples,
                              join(output_path, out_file),
                              nrow=1,
                              normalize=True,
                              range=(-1., 1.))
            print('{:s} done!'.format(out_file))
    elif args_.use_model == 'discriminator':
        with torch.no_grad():
            result = img_unit
            abn_att_real, cabn_att_real = attgan.D(img_real, 'abn')
            for i in range(len(att_list) - 1):
                abn_att = F.interpolate(abn_att_real[i],
                                        size=(mw, mh),
                                         'Face Attribute Manipulation') / 10
        att_b[0][1] = cv2.getTrackbarPos('Bangs',
                                         'Face Attribute Manipulation') / 10
        att_b[0][2] = cv2.getTrackbarPos('Black_Hair',
                                         'Face Attribute Manipulation') / 10
        att_b[0][3] = cv2.getTrackbarPos('Blond_Hair',
                                         'Face Attribute Manipulation') / 10
        att_b[0][4] = cv2.getTrackbarPos('Brown_Hair',
                                         'Face Attribute Manipulation') / 10
        att_b[0][5] = cv2.getTrackbarPos('Bushy_Eyebrows',
                                         'Face Attribute Manipulation') / 10
        att_b[0][6] = cv2.getTrackbarPos('Eyeglasses',
                                         'Face Attribute Manipulation') / 10
        att_b[0][7] = cv2.getTrackbarPos('Male',
                                         'Face Attribute Manipulation') / 10
        att_b[0][8] = cv2.getTrackbarPos('Mouth_Slightly_Open',
                                         'Face Attribute Manipulation') / 10
        att_b[0][9] = cv2.getTrackbarPos('Mustache',
                                         'Face Attribute Manipulation') / 10
        att_b[0][10] = cv2.getTrackbarPos('No_Beard',
                                          'Face Attribute Manipulation') / 10
        att_b[0][11] = cv2.getTrackbarPos('Pale_Skin',
                                          'Face Attribute Manipulation') / 10
        att_b[0][12] = cv2.getTrackbarPos('Young',
                                          'Face Attribute Manipulation') / 10

        with torch.no_grad():
            att_b_ = (att_b * 2 - 1) * args.thres_int
            img_a = attgan.G(img_ori, att_b_)
            _, att_test = attgan.D(img_a)
Beispiel #5
0
        if (it + 1) % args.save_interval == 0:
            # To save storage space, I only checkpoint the weights of G.
            # If you'd like to keep weights of G, D, optim_G, optim_D,
            # please use save() instead of saveG().
            attgan.save(
                os.path.join('output', args.experiment_name, 'checkpoint',
                             'weights.{:d}.pth'.format(epoch)))

        if (it + 1) % args.sample_interval == 0:
            attgan.eval()
            with torch.no_grad():
                samples = [fixed_img_real]
                for i, att_trg in enumerate(sample_attr_list):
                    attr_diff = att_trg - sample_attr_list[0]
                    samples.append(attgan.G(fixed_img_real, attr_diff))
                samples = torch.cat(samples, dim=3)
                writer.add_image(
                    'sample',
                    vutils.make_grid(samples,
                                     nrow=1,
                                     normalize=True,
                                     range=(-1., 1.)), it + 1)
                vutils.save_image(samples,
                                  os.path.join(
                                      'output', args.experiment_name,
                                      'sample_training',
                                      'Epoch_({:d})_({:d}of{:d}).jpg'.format(
                                          epoch, it % it_per_epoch + 1,
                                          it_per_epoch)),
                                  nrow=1,
Beispiel #6
0
attgan.eval()
for idx, (img_a, att_a) in enumerate(test_dataloader):
    if args.num_test is not None and idx == args.num_test:
        break
    
    img_a = img_a.cuda() if args.gpu else img_a
    att_a = att_a.cuda() if args.gpu else att_a
    att_a = att_a.type(torch.float)
    
    att_b_list = [att_a]
    for i in range(args.n_attrs):
        tmp = att_a.clone()
        tmp[:, i] = 1 - tmp[:, i]
        tmp = check_attribute_conflict(tmp, args.attrs[i], args.attrs)
        att_b_list.append(tmp)

    with torch.no_grad():
        samples = [img_a]
        for i, att_target in enumerate(att_b_list):
            if i > 0:
                att_diff = att_target - att_b_list[0]
                samples.append(attgan.G(img_a, att_diff))
        samples = torch.cat(samples, dim=3)
        out_file = '{:06d}.jpg'.format(idx + 182638)
        vutils.save_image(
            samples, join(output_path, out_file),
            nrow=1, normalize=True, range=(-1., 1.)
        )
        print('{:s} done!'.format(out_file))