Beispiel #1
0
 def __init__(self):
     self.opt = DemoOptions().parse()
     self.opt.data_dir = './datasets/frontend_upload'
     self.opt.checkpoints_dir = './checkpoints'
     self.model = Pix2PixModel(self.opt)
     self.model.opt.inpaint_mode = 'ref'
     self.model.eval()
Beispiel #2
0
        self.orient_scene.size = 14

    def orient_increase(self):
        if self.scene.size < 15:
            self.scene.size += 1

    def orient_decrease(self):
        if self.scene.size > 1:
            self.scene.size -= 1

    def selectM(self):
        if self.clickButtion1.isChecked():
            print("select Reference Mask")
        elif self.clickButtion2.isChecked():
            print("select Edited Mask")

    def selectO(self):
        if self.clickButtion3.isChecked():
            print("select Reference Orient")
        elif self.clickButtion4.isChecked():
            print("select Edited Orient")


if __name__ == "__main__":
    opt = DemoOptions().parse()
    model = Pix2PixModel(opt)
    model.eval()
    app = QApplication(sys.argv)
    ex = Ex(model, opt)
    sys.exit(app.exec_())
Beispiel #3
0
import time
import os
from options.demo_options import DemoOptions
opt = DemoOptions().parse()  # set CUDA_VISIBLE_DEVICES before import torch
import torch
import pickle
import torchvision.transforms as transforms
from models.models import create_model
from util.visualizer import Visualizer
from pdb import set_trace as st
from util import html
from util.Vgg16 import Vgg16Part
from util.util import init_vgg16
from util.build_vocab import Vocabulary
import torch
from PIL import Image
import nltk
import json
from util import util

opt.nThreads = 1   # test code only supports nThreads=1
opt.batchSize = 1  #test code only supports batchSize=1
opt.serial_batches = True # no shuffle
opt.lambda_p = 0

# Load vocabulary wrapper.
with open(opt.vocab_path, 'rb') as f:
     vocab = pickle.load(f)

opt.vocab = vocab
opt.vocab_size = len(vocab)