示例#1
0
            p.join()
        worker_estimator(args, manager, config, make_env)
        logging.info("pretrain ends here")

    elif args.test:
        logging.debug('test')
        logging.disable(logging.DEBUG)
        current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
        # dir_name = datetime.now().isoformat()
        args.save_dir = os.path.join('model_' + args.gan_type, current_time)
        logging.info(args.save_dir)

        agent = GAN(make_env, args, manager, config, 1, infer=True)
        agent.load(args.load)
        logging.info("model loading finish and start evaluating")
        agent.evaluate(save_dialog=True)
        # agent.evaluate()
        # agent.expert_generator()

    else:  # training
        current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
        # dir_name = datetime.now().isoformat()
        args.save_dir = os.path.join('model_' + args.gan_type, current_time)
        logging.info(args.save_dir)

        # current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
        logging.debug('train {}'.format(current_time))

        agent = GAN(make_env,
                    args,
                    manager,
示例#2
0
def main(num_faces, num_epochs, cuda, verbose=False):
    # set computation device (None/CPU if in development mode, CUDA otherwise)
    device = torch.device("cuda:0") if cuda else None

    # load faces
    masked_dir = "../data/masked"
    masked_suffix = "_Mask.jpg"
    unmasked_dir = "../data/unmasked"
    unmasked_suffix = ".png"
    masked_faces, unmasked_faces, idx_to_face_id = utils.load_faces(
        num_faces, masked_dir, masked_suffix, unmasked_dir, unmasked_suffix)
    if verbose:
        print("loaded {} faces...".format(num_faces))

    # split data into training and testing sets
    split = int(0.8 * num_faces)
    train_input, train_output = (
        masked_faces[:split],
        torch.Tensor((range(0, split))).long(),
    )
    test_input, test_output = (
        masked_faces[split:],
        torch.Tensor(range(split, num_faces)).long(),
    )
    static_faces = unmasked_faces[:split]

    # instantiate GAN
    generator = Generator(learning_rate=2e-3)
    projector = Projector(load_path="../models/projector.pt")
    discriminator = Discriminator()
    gan = GAN(generator, projector, discriminator, device=device)
    if verbose:
        print("instantiated GAN...")

    # compute and store unmasked discriminator embeddings
    gan.compute_unmasked_embeddings(unmasked_faces)

    # train
    if verbose:
        print("training initiated...")
    gan.fit(train_input,
            static_faces,
            train_output,
            num_epochs=num_epochs,
            verbose=verbose)
    if verbose:
        print("\ntraining complete...")

    # save models
    save_dir = "../models"
    suffix = time.strftime("%Y%m%d_%H%M%S")
    gan.save(save_dir, suffix)
    if verbose:
        print("models saved under '{}/<model>_{}'...".format(save_dir, suffix))

    # display sample masks and faces
    plt.figure()
    fig, axes = plt.subplots(2, 5)
    fig.set_figwidth(20)
    fig.set_figheight(7)
    for idx in range(5):
        # original image
        face_id = idx_to_face_id[idx]
        original_img = Image.open("../data/masked/{}_Mask.jpg".format(face_id))
        axes[0, idx].imshow(original_img)
        axes[0, idx].get_xaxis().set_ticks([])
        axes[0, idx].get_yaxis().set_ticks([])

        # generated mask image
        mask = (gan.generator(torch.rand(1, 100).to(device))
                if device else gan.generator())
        masked_tensor = masked_faces[idx].unsqueeze(0)
        if device:
            masked_tensor = masked_tensor.to(device)
        masked_image = gan.project_mask(mask, masked_tensor, process=True)[0]
        masked_image = torch.transpose(masked_image, 0, 1)
        masked_image = torch.transpose(masked_image, 1, 2)
        masked_image = masked_image.cpu().detach().numpy()
        axes[1, idx].imshow(masked_image)
        axes[1, idx].get_xaxis().set_ticks([])
        axes[1, idx].get_yaxis().set_ticks([])
    plt.savefig("../figures/sample_masks.png")

    # evaluate accuracy
    train_accuracy = gan.evaluate(train_input, train_output)
    test_accuracy = gan.evaluate(test_input, test_output)
    masked_accuracy = gan.discriminator_evaluate(masked_faces, unmasked_faces)
    unmasked_accuracy = gan.discriminator_evaluate(unmasked_faces,
                                                   unmasked_faces)
    print("\nfacial recognition accuracy for...")
    print("   random choice:\t\t{:.1f}%".format(100 / num_faces))
    print("   training images:\t\t{:.1f}%".format(100 * train_accuracy))
    print("   testing images:\t\t{:.1f}%".format(100 * test_accuracy))
    print("   original masked images:\t{:.1f}%".format(100 * masked_accuracy))
    print("   original unmasked images:\t{:.1f}%".format(100 *
                                                         unmasked_accuracy))

    # write results to file
    file_path = "../data/accuracy.txt"
    with open(file_path, "w") as file:
        file.write("facial recognition accuracy for...")
        file.write("\n   random choice:\t\t{:.1f}%".format(100 / num_faces))
        file.write("\n   training images:\t\t{:.1f}%".format(100 *
                                                             train_accuracy))
        file.write("\n   testing images:\t\t{:.1f}%".format(100 *
                                                            test_accuracy))
        file.write("\n   original masked images:\t{:.1f}%".format(
            100 * masked_accuracy))
        file.write("\n   original unmasked images:\t{:.1f}%".format(
            100 * unmasked_accuracy))
    if verbose:
        print("\nsaved results...")
        print("done:)")