def gender_swap():
    img_filename = os.path.join(UPLOAD_FOLDER, "image.jpg")

    aligned_img = align_face(img_filename, shape_predictor)[0]
    img = transform(aligned_img).unsqueeze(0)

    model_to_male = define_G(**config_G)
    pretrained_dict_to_male = torch.load(weights_path_to_male)

    model_to_male.load_state_dict(pretrained_dict_to_male)
    model_to_male.cuda()

    model_to_female = define_G(**config_G)
    pretrained_dict_to_female = torch.load(weights_path_to_female)

    model_to_female.load_state_dict(pretrained_dict_to_female)
    model_to_female.cuda()

    with torch.no_grad():
        out_male = model_to_male(img.cuda())
        out_female = model_to_female(img.cuda())

    out_male = util.tensor2im(out_male.data[0])
    out_female = util.tensor2im(out_female.data[0])

    path_male=os.path.join(THIS_FOLDER,RESULTS_FOLDER,'result_male.jpg')
    path_female = os.path.join(THIS_FOLDER,RESULTS_FOLDER,'result_female.jpg')
    imageio.imsave(path_male, out_male)
    imageio.imsave(path_female, out_female)
예제 #2
0
def texture():
    #while True:
    time.sleep(.1)
    data_loader = CreateDataLoader(opt)
    data_loader.dataset.A_paths.sort(key=lambda x: os.path.getctime(str(x)),
                                     reverse=True)

    dataset = data_loader.load_data()
    visualizer = Visualizer(opt)
    # create website
    web_dir = os.path.join(opt.results_dir, opt.name,
                           '%s_%s' % (opt.phase, opt.which_epoch))
    webpage = html.HTML(
        web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
        (opt.name, opt.phase, opt.which_epoch))
    for i, data in enumerate(dataset):

        if i >= 1:
            break
        if opt.data_type == 16:
            data['label'] = data['label'].half()
            data['inst'] = data['inst'].half()
        elif opt.data_type == 8:
            data['label'] = data['label'].uint8()
            data['inst'] = data['inst'].uint8()
        if opt.export_onnx:
            print("Exporting to ONNX: ", opt.export_onnx)
            assert opt.export_onnx.endswith(
                "onnx"), "Export model file should end with .onnx"
            torch.onnx.export(model, [data['label'], data['inst']],
                              opt.export_onnx,
                              verbose=True)
            exit(0)
    check4ModelRequest(modelLoaded)

    minibatch = 4
    generated = model.inference(data['label'], data['inst'])

    visuals = OrderedDict([
        ('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
        ('output_image', util.tensor2im(generated.data[0]))
    ])
    img_path = data['path']
    print('process image... %s' % img_path)
    visualizer.save_images(webpage, visuals, img_path)
예제 #3
0
    elif opt.data_type == 8:
        data['label'] = data['label'].uint8()
        data['inst'] = data['inst'].uint8()
    if opt.export_onnx:
        print("Exporting to ONNX: ", opt.export_onnx)
        assert opt.export_onnx.endswith(
            "onnx"), "Export model file should end with .onnx"
        torch.onnx.export(model, [data['label'], data['inst']],
                          opt.export_onnx,
                          verbose=True)
        exit(0)
    minibatch = 1
    if opt.engine:
        generated = run_trt_engine(opt.engine, minibatch,
                                   [data['label'], data['inst']])
    elif opt.onnx:
        generated = run_onnx(opt.onnx, opt.data_type, minibatch,
                             [data['label'], data['inst']])
    else:
        generated = model.inference(data['label'], data['inst'], data['image'])

    visuals = OrderedDict([
        ('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
        ('synthesized_image', util.tensor2im(generated.data[0]))
    ])
    img_path = data['path']
    print('process image... %s' % img_path)
    visualizer.save_images(webpage, visuals, img_path)

webpage.save()
예제 #4
0
model = define_G(**config_G)
pretrained_dict = torch.load(weights_path)
model.load_state_dict(pretrained_dict)
model.cuda()

# ## Use model

# In[ ]:

img = transform(aligned_img).unsqueeze(0)

with torch.no_grad():
    out = model(img.cuda())

out = util.tensor2im(out.data[0])

# ## Result

# In[ ]:

plt.figure(figsize=(10, 5))

plt.subplot(1, 2, 1)
plt.imshow(aligned_img)
plt.axis('off')

plt.subplot(1, 2, 2)
plt.imshow(out)
plt.axis('off')
예제 #5
0
web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
# test

caffe.set_device(0)
caffe.set_mode_gpu()

net = caffe.Net('DeepHarmonization/model/deploy_512.prototxt',
                'DeepHarmonization/model/harmonize_iter_200000.caffemodel', caffe.TEST)  # noqa

for i, data in enumerate(dataset):
    if i >= opt.how_many:
        break
    generated = model.inference(data['label'], data['inst'])
    visuals = OrderedDict([('input_label', util.tensor2label(data['image_composite_with_background'][0], opt.label_nc)),
                           ('input_mask', util.tensor2im(data['mask_composite_object'][0], normalize=False)),
                           ('synthesized_image', util.tensor2im(generated.data[0]))])

    im = visuals['synthesized_image']  # The size is opt.fineSize x opt.fineSize x 3. The range is 0-255.
    mask = visuals['input_mask']  # The size should be opt.fineSize x opt.fineSize x 3, and the range should be 0-255.
    mask = mask[:, :, 0]  # Only use the first chance and reduce the size to opt.fineSize x opt.fineSize x 1.

    im = scipy.misc.imresize(im, [512, 512])
    mask = scipy.misc.imresize(mask, [512, 512])

    im = im.astype(float)
    mask = mask.astype(float)

    im = im[:, :, ::-1]
    im -= np.array((104.00699, 116.66877, 122.67892))
    im = im.transpose((2, 0, 1))
예제 #6
0
        model.type(torch.uint8)

    if opt.verbose:
        print(model)
else:
    from run_engine import run_trt_engine, run_onnx

prev_generation = None
for i, data in enumerate(dataset):
    # if i >= opt.how_many:
    #     break
    if opt.data_type == 16:
        data['label'] = data['label'].half()
    elif opt.data_type == 8:
        data['label'] = data['label'].uint8()

    minibatch = 1
    pose_t = data['label'][:, :opt.label_nc, :, :]

    generated = model.inference(pose_t, prev_generation)
    prev_generation = generated

    visuals = OrderedDict([
        ('input_label', util.tensor2im(pose_t[0].data, normalize=False)),
        ('synthesized_image', util.tensor2im(generated[0].data))
    ])
    img_path = data['path']
    print('process image... %s' % img_path)
    visualizer.save_images(webpage, visuals, img_path)

webpage.save()
예제 #7
0
            loss_D.backward()
        optimizer_D.step()

        ############## Display results and errors ##########
        ### print out errors
        if total_steps % opt.print_freq == print_delta:
            errors = { k: v.data.item() if not isinstance(v, int) else v for k, v in loss_dict.items() }
            t = (time.time() - iter_start_time) / opt.print_freq
            visualizer.print_current_errors(epoch, epoch_iter, errors, t)
            visualizer.plot_current_errors(errors, total_steps)
            # call(["nvidia-smi", "--format=csv", "--query-gpu=memory.used,memory.free"])

        ### display output images
        if save_fake:
            visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
                                   ('synthesized_image', util.tensor2im(generated.data[0])),
                                   ('real_image', util.tensor2im(data['image'][0]))])
            visualizer.display_current_results(visuals, epoch, total_steps)

        ### save latest model
        if total_steps % opt.save_latest_freq == save_delta:
            print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
            model.module.save('latest')
            np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')

        if epoch_iter >= dataset_size:
            break

    # end of epoch
    iter_end_time = time.time()
    print('End of epoch %d / %d \t Time Taken: %d sec' %