def prepare_options(self, path):
        sys.path.append(path)
        from options.test_options import TestOptions
        from models.models import create_model
        sys.argv = [sys.argv[0]]
        os.mkdir(self.input_file_grp + "/test_A/")
        opt = TestOptions().parse(save=False)
        opt.nThreads = 1  # test code only supports nThreads = 1
        opt.batchSize = 1  # test code only supports batchSize = 1
        opt.serial_batches = True  # no shuffle
        opt.no_flip = True  # no flip
        opt.rood_dir = self.input_file_grp  # make into proper path
        opt.checkpoints_dir = self.parameter['checkpoint_dir']
        opt.dataroot = self.input_file_grp
        opt.name = self.parameter['model_name']
        opt.label_nc = 0
        opt.no_instance = True
        opt.resize_or_crop = self.parameter['imgresize']
        opt.n_blocks_global = 10
        opt.n_local_enhancers = 2
        opt.gpu_ids = [self.parameter['gpu_id']]
        opt.loadSize = self.parameter['resizeHeight']
        opt.fineSize = self.parameter['resizeWidth']

        model = create_model(opt)

        return opt, model
Ejemplo n.º 2
0
def main():
    opt = TestOptions().parse()
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip

    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    model = create_model(opt)
    visualizer = Visualizer(opt)
    # create website
    web_dir = os.path.join(opt.results_dir, opt.name,
                           '%s_%s' % (opt.phase, opt.which_epoch))
    webpage = html.HTML(
        web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
        (opt.name, opt.phase, opt.which_epoch))
    # test
    for i, data in enumerate(dataset):
        if i >= opt.how_many:
            break
        model.set_input(data)
        model.test()
        visuals = model.get_current_visuals()
        img_path = model.get_image_paths()
        print('%04d: process image... %s' % (i, img_path))
        visualizer.save_images(webpage,
                               visuals,
                               img_path,
                               aspect_ratio=opt.aspect_ratio)

    webpage.save()
Ejemplo n.º 3
0
def main():
    opt = TestOptions().parse()
    opt.no_flip = True
    opt.batchSize = 1

    data_loader = CreateDataLoader(opt)

    model = SingleGAN()
    model.initialize(opt)

    web_dir = os.path.join(opt.results_dir, 'test')
    webpage = html.HTML(web_dir, 'task {}'.format(opt.name))

    for i, data in enumerate(islice(data_loader, opt.how_many)):
        print('process input image %3.3d/%3.3d' % (i, opt.how_many))
        all_images, all_names = model.translation(data)
        img_path = 'image%3.3i' % i
        save_images(webpage,
                    all_images,
                    all_names,
                    img_path,
                    None,
                    width=opt.fineSize)

    webpage.save()
Ejemplo n.º 4
0
def gan_main():
    opt = TestOptions().parse(save=False)
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip

    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    model = create_model(opt)
    visualizer = Visualizer(opt)
    # create website
    web_dir = os.path.join(opt.results_dir, opt.name,
                           '%s_%s' % (opt.phase, opt.which_epoch))
    webpage = html.HTML(
        web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
        (opt.name, opt.phase, opt.which_epoch))
    # test
    for i, data in enumerate(dataset):
        if i >= opt.how_many:
            break
        generated = model.inference(data['label'], data['inst'])
        visuals = OrderedDict([
            ('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
            ('synthesized_image', util.tensor2im(generated.data[0]))
        ])
        img_path = data['path']
        print('process image... %s' % img_path)
        visualizer.save_images(webpage, visuals, img_path)

    webpage.save()
Ejemplo n.º 5
0
def setup(opts):
    generator_checkpoint_path = opts['generator_checkpoint']
    try:
        os.makedirs('checkpoints/pretrained/')
    except OSError:
        pass
    shutil.copy(generator_checkpoint_path,
                'checkpoints/pretrained/latest_net_G.pth')

    opt = TestOptions(args=[
        '--name',
        'pretrained',
        '--netG',
        'local',
        '--ngf',
        '32',
        '--resize_or_crop',
        'none',
    ]).parse(save=False)
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    model = create_model(opt)
    return {'model': model, 'opt': opt}
Ejemplo n.º 6
0
def CreatePix2PixModel(gpu):
    # --which_direction AtoB --model two_pix2pix --name soccer_seg_detection_pix2pix --output_nc 1 --dataset_mode aligned --which_model_netG unet_256 --norm batch --how_many 186 --loadSize 256

    opt = TestOptions()  #.parse()
    # print("0--")

    # Custom stuff that is normally passed on command line
    opt.dataroot = './ExtractPitchLines/datasets/soccer_seg_detection'
    opt.which_direction = 'AtoB'
    opt.model = 'two_pix2pix'
    opt.name = 'Linedetection'
    opt.output_nc = 1
    opt.dataset_mode = 'aligned'
    opt.which_model_netG = 'unet_256'
    opt.norm = 'batch'
    opt.how_many = 186
    opt.loadSize = 256

    # determine if you use GPU
    # Use GPU
    # opt.gpu_ids = [0]
    # Use CPU
    # opt.gpu_ids = []

    if gpu == True:
        opt.gpu_ids = [0]
    else:
        opt.gpu_ids = []

    # Default stuff
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.continue_train = False

    # Newly added
    opt.phase = 'test'
    opt.resize_or_crop = 'resize_and_crop'
    opt.isTrain = False
    opt.checkpoints_dir = './checkpoints'
    opt.input_nc = 3
    opt.ndf = 64
    opt.ngf = 64
    opt.no_dropout = False
    opt.init_type = 'normal'
    opt.which_epoch = 'latest'
    opt.which_model_netD = 'basic'

    # print(opt.dataroot)

    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    model = create_model(opt)

    return model
Ejemplo n.º 7
0
def load_model(name):
  global opt
  opt = TestOptions().parse(save=False)
  opt.nThreads = 1
  opt.batchSize = 1
  opt.serial_batches = True
  opt.no_flip = True
  opt.name = name
  opt.resize_or_crop = 'none'
  opt.use_features = False
  opt.no_instance = True
  opt.label_nc = 0
  return create_model(opt)
Ejemplo n.º 8
0
def create_mp4(input_img_path, csv_path):
    
    opt = TestOptions().parse()
    opt.input_img = input_img_path
    opt.csv_path = csv_path
    opt.nThreads = 1   # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    
    ###### PART 1    
    detector = dlib.get_frontal_face_detector()
    
    name = opt.input_img
    cap = cv2.imread(name) # add your image here
#    image= cv2.resize(cap, (400, 400))
    
    RGB = cv2.cvtColor(cap, cv2.COLOR_BGR2RGB) 
           
    rects = detector(RGB, 1)
    
    for rect in rects:
        c1=rect.dcenter()
        (x, y, w, h) = rect_to_bb(rect)
        w=np.int(w*1.6) 
        h=np.int(h*1.6) 
        x=c1.x-np.int(w/2.0)
        y=c1.y-np.int(h/2.0)
        if y<0:
           y=0
        if x<0:
           x=0
           
        faceOrig = imutils.resize(RGB[y:y+h, x:x+w], height=256) #y=10,h+60,W+40
        d_num = np.asarray(faceOrig)
        f_im = Image.fromarray(d_num)
        f_im.save('./temp.png')
            
        
    #### PART 2
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    model = create_model(opt)
    for i, data in enumerate(dataset):              
        if i >= opt.how_many:
            break        
        model.set_input(data)
        model.test()
    
    os.system('rm temp.png')
    print("Done!")
Ejemplo n.º 9
0
Archivo: test.py Proyecto: yqGANs/DMIT
def main():    
    opt = TestOptions().parse()
    opt.is_flip = False  
    opt.batchSize = 1
    data_loader = CreateDataLoader(opt)
    model = create_model(opt) 
    web_dir = os.path.join(opt.results_dir, 'test')
    webpage = html.HTML(web_dir, 'task {}'.format(opt.exp_name))

    for i, data in enumerate(islice(data_loader, opt.how_many)):
        print('process input image %3.3d/%3.3d' % (i, opt.how_many))
        results = model.translation(data)
        img_path = 'image%3.3i' % i
        save_images(webpage, results, img_path, None, width=opt.fine_size)
    webpage.save()
Ejemplo n.º 10
0
    def __init__(self, options):
        opt = TestOptions().parse()
        opt.nThreads = 1  # test code only supports nThreads = 1
        opt.batchSize = 1  # test code only supports batchSize = 1
        opt.serial_batches = True  # no shuffle
        opt.no_flip = True  # no flip
        if options['use_single_gpu']:
            opt.gpu_ids = [0]  # use first gpu
        else:
            opt.gpu_ids = []  # use cpu
        # opt.model = 'test'
        opt.dataset_mode = 'single'
        opt.learn_residual = True

        self.model = create_model(opt)
        pass
Ejemplo n.º 11
0
def setup():
    global opt
    opt = TestOptions().parse()
    opt.nThreads = 1
    opt.batchSize = 1
    opt.serial_batches = True
    opt.no_flip = True
    opt.name = 'pretrained'
    opt.checkpoints_dir = '.'
    opt.model = 'pix2pix'
    opt.which_direction = 'AtoB'
    opt.norm = 'batch'
    opt.input_nc = 3
    opt.output_nc = 1
    opt.which_model_netG = 'resnet_9blocks'
    opt.no_dropout = True
    model = create_model(opt)
    return model
Ejemplo n.º 12
0
def fake_init():

    opt = TestOptions().parse()
    opt.nThreads = 1   # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.display_id = -1  # no visdom display
    # print(opt.which_model_netG)

    model = create_model(opt)
    model.setup(opt)
    # test
    # data = get_data_numpy_file('datasets/20180822_fix_max_real_data/trainA/2018_08_20_14_38_19_6309872.npy')
    data, id_array = get_data_json('../server/user_collected_data/2018_08_20_14_38_19_6309872.json')
    model.set_input(data)
    output = model.test()
    focal_array = parse_to_id_array(output, id_array)
    json_data = json.dumps(focal_array, indent=2)
Ejemplo n.º 13
0
def init_model():
    opt = TestOptions().parse()
    opt.nThreads = 1   # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.display_id = -1  # no visdom display
    opt.checkpoints_dir = '../machinelearning/checkpoints'
    model = create_model(opt)
    model.setup(opt)
    return model, opt
    # test
    # data = get_data_numpy_file('datasets/20180822_fix_max_real_data/trainA/2018_08_20_14_38_19_6309872.npy')
    data, id_array = get_data_json('../server/user_collected_data/2018_08_20_14_38_19_6309872.json')
    for i in range(10):
        model.set_input(data)
        output = model.test()
        focal_array = parse_to_id_array(output, id_array)
        json_data = json.dumps(focal_array, indent=2)
Ejemplo n.º 14
0
def main():
    opt = TestOptions().parse()
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip

    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()

    # read pix2pix/PAN model
    if opt.model == 'pix2pix':
        assert (opt.dataset_mode == 'aligned')
        from models.pix2pix_model import Pix2PixModel
        model = Pix2PixModel()
        model.initialize(opt)
    elif opt.model == 'pan':
        from models.pan_model import PanModel
        model = PanModel()
        model.initialize(opt)

    visualizer = Visualizer(opt)

    # create website
    web_dir = os.path.join(opt.results_dir, opt.name,
                           '%s_%s' % (opt.phase, opt.which_epoch))
    webpage = html.HTML(
        web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
        (opt.name, opt.phase, opt.which_epoch))

    # test
    for i, data in enumerate(dataset):
        if i >= opt.how_many:  # default 50 images
            break
        model.set_input(data)
        model.test()
        visuals = model.get_current_visuals()
        img_path = model.get_image_paths()
        print('process image... %s' % img_path)
        visualizer.save_images(webpage, visuals, img_path)

    webpage.save()
Ejemplo n.º 15
0
def setup(opts):
    generator_checkpoint_path = opts['generator_checkpoint']
    try:
        os.makedirs('checkpoints/pretrained/')
    except OSError:
        pass
    shutil.copy(generator_checkpoint_path,
                'checkpoints/pretrained/latest_net_G.pth')
    opt = TestOptions().parse(save=False)
    opt.nThreads = 1
    opt.batchSize = 1
    opt.no_flip = True
    opt.name = 'pretrained'
    opt.ngf = 32
    opt.label_nc = 0
    opt.no_instance = True
    opt.fp16 = True
    opt.resize_or_crop = 'none'
    model = create_model(opt)
    return {'model': model, 'opt': opt}
Ejemplo n.º 16
0
def setup(opts):
    checkpoint = opts["checkpoint"]
    vgg_weights = opts["vgg_weights"]

    opt = TestOptions().parse()
    opt.nThreads = 1   # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.instance_norm = 0
    opt.resize_or_crop = 'no'
    opt.norm = "instance"
    opt.self_attention = True
    opt.times_residual = True
    opt.no_dropout = True

    model = create_model(opt, checkpoint, vgg_weights)
    #visualizer = Visualizer(opt)

    return {"model" : model,
            "opt" : opt}
Ejemplo n.º 17
0
def main():
    opt = TestOptions().parse()
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip

    if not os.path.isdir(opt.results_dir):
        os.makedirs(opt.results_dir)

    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()
    model = create_model(opt)

    # test
    for i, data in enumerate(dataset):
        model.set_input(data)
        img_path = model.get_image_paths()
        print('Processing %04d (%s)' % (i + 1, img_path[0]))
        model.test()
        model.write_image(opt.results_dir)
    def __init__(self, gpu_ids=[]):
        opt = TestOptions()  #.parse()
        opt.nThreads = 1  # test code only supports nThreads = 1
        opt.batchSize = 1  # test code only supports batchSize = 1
        opt.serial_batches = True  # no shuffle
        opt.no_flip = True  # no flip
        opt.display_id = -1  # no visdom display
        opt.dataset_mode = "single"
        opt.dataroot = "."
        opt.phase = "test"
        opt.loadSize = 256
        opt.fineSize = 256
        opt.isTrain = False
        opt.input_nc = 3
        opt.output_nc = 3
        opt.gpu_ids = gpu_ids
        opt.name = "NU_SEG"
        opt.model_suffix = ""
        opt.checkpoints_dir = "../../NucleiSegmentation/checkpoints/"
        opt.model = "test"
        opt.ngf = 64
        opt.norm = "instance"
        opt.which_model_netG = "unet_256"
        opt.resize_or_crop = "resize_and_crop"
        opt.which_epoch = "latest"
        opt.no_dropout = "store_true"
        opt.init_type = "normal"
        opt.init_gain = 0.02
        opt.verbose = ""
        opt.which_direction = "BtoA"
        data_loader = CreateDataLoader(opt)
        dataset = data_loader.load_data()
        model = create_model(opt)
        model.setup(opt)

        self.model = model
Ejemplo n.º 19
0
def image_harmonization_eval(input_file_path, mask_file_path):
    opt = TestOptions().parse()
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.display_id = -1  # no visdom display

    with open(input_file_path, 'rb') as f:
        with Image.open(f) as input_img:
            input_img = input_img.convert('RGB')

    with open(mask_file_path, 'rb') as f:
        with Image.open(f) as mask_img:
            mask_img = mask_img.convert('L')

    input_image = np.array(input_img, np.float32) / 255.0
    input_image = input_image.transpose((2, 0, 1))

    input_mask = np.expand_dims(np.array(mask_img, np.float32) / 255.0, axis=0)

    input_image = torch.from_numpy(input_image)
    input_mask = torch.from_numpy(input_mask)

    mean = torch.tensor(0.5).view(-1, 1, 1)
    std = torch.tensor(0.5).view(-1, 1, 1)

    input_image = (input_image - mean) / std

    input_image = torch.unsqueeze(input_image, 0)
    input_mask = torch.unsqueeze(input_mask, 0)

    model = create_model(opt)
    model.set_input_eval([input_image, input_mask])
    pred = model.eval()
    save_images(pred, aspect_ratio=1.0)

    return pred
Ejemplo n.º 20
0
# Extract the options
opt = TestOptions().parse()

# For testing  the neural networks, manually edit/add options below
opt.gan_mode = 'none'  # 'wgangp', 'lsgan', 'vanilla', 'none'

opt.n_downsample = 2  # Downsample times
opt.n_blocks = 2  # Numebr of residual blocks
opt.first_kernel = 5  # The filter size of the first convolutional layer in encoder

# Set the input dataset
opt.dataset_mode = 'CIFAR10'  # Current dataset:  CIFAR10, CelebA

# Set up the training procedure
opt.batchSize = 1  # batch size

opt.activation = 'sigmoid'  # The output activation function at the last layer in the decoder
opt.norm_EG = 'batch'

if opt.dataset_mode == 'CIFAR10':
    opt.dataroot = './data'
    opt.size = 32
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=True,
Ejemplo n.º 21
0
from collections import OrderedDict
from torch.autograd import Variable
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
from util import html
import torch
from data.base_dataset import get_params, get_transform
from torchvision import models, transforms

# Model Options that match the training
opt = TestOptions().parse(save=False)
opt.nThreads = 1
opt.batchSize = 1
opt.serial_batches = True
opt.no_flip = True
opt.name = 'lateshow'
#opt.netG = 'local'
#opt.ngf = 32
opt.resize_or_crop = 'none'
opt.use_features = False
opt.no_instance = True
opt.label_nc = 0

# Load the model
model = create_model(opt)

# Load a hard code image, just to test
raw_img = Image.open("imgs/lateshow_pose.jpg")
Ejemplo n.º 22
0
def main(style):

    opt = TestOptions().parse()

    opt.dataroot = "datasets/own_data/testA"

    # four styles
    # opt.name = "style_ink_pretrained"
    # opt.name = "style_monet_pretrained"
    # opt.name = "style_cezanne_pretrained"
    # opt.name = "style_ukiyoe_pretrained"
    # opt.name = "style_vangogh_pretrained"


    # set original img size
    original_img = cv2.imread(opt.dataroot+"/temp.jpg")
    original_img_shape = tuple([item for item in original_img.shape[:-1]][::-1])

    opt.name = "style_%s_pretrained" % style
    # 不可更改
    opt.model = "test"

    cv2.imread("temp.jpg")

    opt.nThreads = 1   # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.display_id = -1  # no visdom display

    # need to overwrite 8-27 这边可以不要
    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()

    # create model
    model = create_model(opt)
    model.setup(opt)

    # create website
    # website没什么用,但是作者把保存图片写到了web_dir里面了,我就没有修改。

    web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
    print("web_dir", web_dir)
    webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
    print("webpage", webpage)
    # exit()

    # test
    for i, data in enumerate(dataset):
        # i is index enumerate生成,很简单的
        # type of data is dict
        # one key is A, A is a tensor which size is ([1, 3, 256, 256]), another is A_path which type is str. from the read path (include the name)
        # i. e. datasets/own_data/testA/2test.jpg
        # default how_many is 50 : 一个数据集中只能处理 50 张照片

        # need to overwrite  "data"
        # data 的形状和其一样,然后外面改写一个监听,应该就可以了
        if i >= opt.how_many:
            break
        model.set_input(data)

        model.test()
        visuals = model.get_current_visuals()
        img_path = model.get_image_paths()
        if i % 5 == 0:
            print('processing (%04d)-th image... %s' % (i, img_path))
        save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)

        generate_img = cv2.imread("results/generate_images/" + "temp.png")
        reshape_generate_img = cv2.resize(generate_img, original_img_shape, interpolation=cv2.INTER_CUBIC)

        cv2.imwrite("results/generate_images/" + "temp.png", reshape_generate_img)
Ejemplo n.º 23
0
def infer(n, image_label_path, image_inst_path):
    opt = TestOptions().parse(save=False)
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    opt.name = "label2city_1024p"
    opt.netG = "local"
    opt.ngf = 32
    opt.resize_or_crop = "none"

    data_loader = CreateOneDataLoader(opt)
    dataset = data_loader.load_data(image_label_path, image_inst_path)
    visualizer = Visualizer(opt)
    # create website
    #web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
    #webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))

    # test
    if not opt.engine and not opt.onnx:
        model = create_model(opt)
        if opt.data_type == 16:
            model.half()
        elif opt.data_type == 8:
            model.type(torch.uint8)

        if opt.verbose:
            print(model)
    else:
        from run_engine import run_trt_engine, run_onnx

    for i, data in enumerate(dataset):
        if i >= opt.how_many:
            break
        if opt.data_type == 16:
            data['label'] = data['label'].half()
            data['inst'] = data['inst'].half()
        elif opt.data_type == 8:
            data['label'] = data['label'].uint8()
            data['inst'] = data['inst'].uint8()
        if opt.export_onnx:
            print("Exporting to ONNX: ", opt.export_onnx)
            assert opt.export_onnx.endswith(
                "onnx"), "Export model file should end with .onnx"
            torch.onnx.export(model, [data['label'], data['inst']],
                              opt.export_onnx,
                              verbose=True)
            exit(0)
        minibatch = 1
        if opt.engine:
            generated = run_trt_engine(opt.engine, minibatch,
                                       [data['label'], data['inst']])
        elif opt.onnx:
            generated = run_onnx(opt.onnx, opt.data_type, minibatch,
                                 [data['label'], data['inst']])
        else:
            generated = model.inference(data['label'], data['inst'])

        visuals = OrderedDict([
            ('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
            ('synthesized_image', util.tensor2im(generated.data[0]))
        ])
        img_path = data['path']
        print('process image... %s' % img_path)
        visualizer.save_image(visuals, n)
Ejemplo n.º 24
0
def main():
    # Read options
    opt = TestOptions().parse(save=False)
    # If demo directory to save generated frames is given
    if opt.demo_dir is not None and not os.path.exists(opt.demo_dir):
        os.makedirs(opt.demo_dir)

    # hardcoded constant values
    opt.nThreads = 0
    opt.batchSize = 1
    opt.serial_batches = True
    # GPU id to be used for mxnet/reconstructor
    opt.gpu_id = opt.gpu_ids[-1]
    # Device to be used for MTCNN face detector
    detector_device = 'cpu'
    # Face bounding box margin
    margin = 120
    # How many frames from the target's training video
    # to consider when gathering head pose and eye size statistics
    n_frames_target_used = 1000
    # How many of the first source frames to consider for eye size adaptation
    # between source and target.
    n_frames_init = 25
    # For cuda initialization errors.
    torch.multiprocessing.set_start_method('spawn', force=True)

    # Initialize video renderer.
    modelG = create_model(opt)
    # Initialize NMFC renderer.
    renderer = NMFCRenderer(opt)
    # Initialize face detector.
    detector = MTCNN(image_size=opt.loadSize,
                     margin=margin,
                     post_process=False,
                     device=detector_device)
    # Initialize landmark extractor.
    dlib_detector = dlib.get_frontal_face_detector()
    dlib_predictor = dlib.shape_predictor(
        'preprocessing/files/shape_predictor_68_face_landmarks.dat')

    # Read the identity parameters from the target person.
    id_params, _ = read_params(
        'id', os.path.join(opt.dataroot, 'train', 'id_coeffs'),
        opt.target_name)
    # Read camera parameters from target
    t_cam_params, _ = read_params('cam',
                                  os.path.join(opt.dataroot, 'train', 'misc'),
                                  opt.target_name)
    t_cam_params = t_cam_params[:n_frames_target_used]
    # Read eye landmarks from target's video.
    eye_landmarks_target = read_eye_landmarks(
        os.path.join(opt.dataroot, 'train', 'landmarks70'), opt.target_name)
    eye_landmarks_target[0] = eye_landmarks_target[0][:n_frames_target_used]
    eye_landmarks_target[1] = eye_landmarks_target[1][:n_frames_target_used]

    # Setup camera capturing
    window_name = 'Hea2Head Demo'
    video_capture = cv2.VideoCapture(0)
    video_capture.set(cv2.CAP_PROP_BUFFERSIZE,
                      2)  # set double buffer for capture
    fps = video_capture.get(cv2.CAP_PROP_FPS)
    print("Video capture at {} fps.".format(fps))

    proccesses = []

    # Face tracker / detector
    box_redecect_nframes = opt.box_redetect_nframes
    box = None  # Face bounding box, calculated by first frame

    # Face reconstructor / NMFC renderer
    nmfc = None  # Current nmfc image
    s_cam_params = []  # camera parameters of source video.
    adapted_cam_params = [
    ]  # camera parameters of source video, adapted to target.

    # Facial (eyes) landmarks detector
    prev_eye_centres = None  # Eye centres in previous frame
    eye_landmarks = None  # Final eye landmarks, send to video renderer.
    eye_landmarks_source = [
        [], []
    ]  # Eye landmarks from n_frames_init first frames of source video.
    eye_landmarks_source_queue = Queue(
    )  # Queue to write extracted eye landmarks from source video.
    landmarks_success_queue = Queue(
    )  # Queue to write whether eye landmark detection was successful
    frames_queue = Queue(
    )  # Queue for writing video frames, read by the landmark detector process.
    # Process for running 68 + 2 landmark detection in parallel with Face reconstruction / NMFC renderering
    proccess_eye_landmarks = Process(
        target=compute_eye_landmarks,
        args=(dlib_detector, dlib_predictor, eye_landmarks_source_queue,
              landmarks_success_queue, frames_queue))
    proccess_eye_landmarks.start()
    proccesses.append(proccess_eye_landmarks)
    print('Launced landmark extractor!')

    # Video renderer (GAN).
    input_queue = torchQueue()  # Queue of GAN's input
    output_queue = torchQueue()  # Queue of GAN's output
    # Process for running the video renderer without waiting NMFC + eye lands creation.
    proccess_video_renderer = torchProcess(target=compute_fake_video,
                                           args=(input_queue, output_queue,
                                                 modelG, opt))
    proccess_video_renderer.start()
    proccesses.append(proccess_video_renderer)
    print('Launced video renderer!')

    camera = None
    if opt.realtime:
        try:
            import pyfakewebcam
            stream_id = opt.realtime_cam_id
            webcam_width = webcam_height = opt.loadSize
            camera = pyfakewebcam.FakeWebcam(f'/dev/video{stream_id}',
                                             webcam_width, webcam_height)
            camera.print_capabilities()
            print(f'Fake webcam created on /dev/video{stream_id}.')
        except Exception as ex:
            print('Fake webcam initialization failed:')
            print(str(ex))

    iter = 0
    # Start main Process (Face reconstruction / NMFC renderering)
    while True:
        t0 = time.perf_counter()
        try:  # Read generated frames from video renderer's output Queue.
            # Non-blocking
            fake_frame, real_frame = output_queue.get_nowait()
            result = np.concatenate([real_frame, fake_frame[..., ::-1]],
                                    axis=1)
            # If output directory is specified save frames there.
            if opt.demo_dir is not None:
                result_path = os.path.join(opt.demo_dir,
                                           "{:06d}".format(iter) + '.png')
                cv2.imwrite(result_path, result)
            elif camera is not None:
                camera.schedule_frame(fake_frame)
            else:
                cv2.imshow(window_name, result)
                cv2.waitKey(1)
        except queue.Empty:  # If empty queue continue.
            pass
        # Read next frame
        _, frame = video_capture.read()
        # Crop the larger dimension of frame to make it square
        frame = make_frame_square(frame)

        if box_redecect_nframes > 0 and iter % box_redecect_nframes == 0:
            box = None
        # If no bounding box has been detected yet, run MTCNN (once in first frame)
        if box is None:
            box = detect_box(detector, frame)
        # If no face detected exit.
        if box is None:
            break
        # Crop frame at the point were the face was seen in the first frame.
        frame = extract_face(frame, box, opt.loadSize, margin)
        frame = tensor2npimage(frame)
        frame = np.transpose(frame, (1, 2, 0))
        # Send ROI frame to landmark detector, while the main Process performs face reconstruction.
        frames_queue.put(frame)
        # Get expression and pose, adapt pose and identity to target and render NMFC.
        success, s_cam_params, adapted_cam_params, new_nmfc = \
            compute_reconstruction(renderer, id_params, t_cam_params, s_cam_params,
                                   adapted_cam_params, frame)
        # Update the current NMFC if reconstruction was successful
        if success:
            nmfc = new_nmfc
        # If not, use previous nmfc. If it does not exist, exit.
        if not success and nmfc is None:
            break
        # Find eye centres using nmfc image.
        eye_centres, prev_eye_centres = search_eye_centres([nmfc[:, :, ::-1]],
                                                           prev_eye_centres)
        # Read Queue to get eye landmarks, if detection was successful.
        if landmarks_success_queue.get():
            eye_landmarks = eye_landmarks_source_queue.get()
        # If not, use previous eye landmarks. If they do not exist, exit.
        if eye_landmarks is None:
            break
        # If in first frames, determine the source-target eye size (height) ratio.
        if iter < n_frames_init:
            eye_landmarks_source[0].append(eye_landmarks[0])
            eye_landmarks_source[1].append(eye_landmarks[1])
            eye_ratios = compute_eye_landmarks_ratio(eye_landmarks_source,
                                                     eye_landmarks_target)
        # Adapt the eye landmarks to the target face, by placing to the eyes centre
        # and re-scaling their size to match the NMFC size and target eyes mean height (top-down distance).
        eye_lands = adapt_eye_landmarks(
            [[eye_landmarks[0]], [eye_landmarks[1]]], eye_centres, eye_ratios,
            s_cam_params[-1:], adapted_cam_params[-1:])
        # Send the conditional input to video renderer
        input_queue.put((nmfc, eye_lands[0], frame))
        iter += 1
        # Show frame rate.
        t1 = time.perf_counter()
        dt = t1 - t0
        print('fps: %0.2f' % (1 / dt))

    # Terminate proccesses and join
    for process in proccesses:
        process.terminate()
        process.join()

    renderer.clear()
    print('Main process exiting')
Ejemplo n.º 25
0
        if (os.path.isdir(path + '/' + f)):
            if (f[0] == '.'):
                pass
            else:
                dirList.append(f)
        if (os.path.isfile(path + '/' + f)):
            fileList.append(f)
    dirList.sort(), fileList.sort()
    return [dirList, fileList]


if __name__ == '__main__':
    with torch.no_grad():
        opt = TestOptions().parse()
        opt.nThreads = 4
        opt.batchSize = 200
        opt.serial_batches = True  # no shuffle
        opt.no_flip = True  # no flip
        opt.fid_count = True
        fid_data_length = 20000
        last_measure = 10
        save_path = opt.result_path
        save_result_flag = True
        if not os.path.isdir(save_path):
            os.mkdir(save_path)
        save_root = os.path.join(save_path, opt.name)
        txt_path = os.path.join(save_root, '%s.txt' % opt.name)

        opt.phase = 'test'
        data_loader = CreateDataLoader(opt)
        test_dataset = data_loader.load_data()
Ejemplo n.º 26
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import time

opt = TestOptions().parse()
opt.batchSize = 1  # set batchSize = 1 for testing

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#testing images = %d' % dataset_size)

model = torch.load(os.path.join('.', opt.checkpoints_dir, opt.name, str(opt.which_epoch) + '.pth'))
model.BasesNet.eval()

accuracies = []
losses = []

for i, data in enumerate(dataset):
	if i >= opt.how_many:
		break
	print(i)
	accuracy, loss = model.test(data)
	accuracies.append(accuracy)
	losses.append(loss)

accuracy = sum(accuracies)/len(accuracies)
Ejemplo n.º 27
0
        pred_result[i_batch] = rel_pred

    # Write to file

    result_dir = os.path.join(opt.results_dir, opt.name, dataset_name)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    with open(os.path.join(result_dir, 'predicted_predicate.pkl'), 'wb') as f:
        pickle.dump(pred_result, f, pickle.HIGHEST_PROTOCOL)
    print('Finished {:d} testing images.'.format(len(test_data_loader)))


if __name__ == '__main__':
    opt = TestOptions().parse()
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1 (1 image at a time)
    opt.serial_batches = True
    opt.gpu_ids = [opt.gpu_ids[0]]
    if 'vrd' in opt.dataroot:
        test_dataset = VrdDataset(opt.dataroot,
                                  split='test',
                                  net=opt.feat_net,
                                  use_gt=opt.use_gt,
                                  use_lang=opt.use_lang)
    else:
        print('No this dataset')
        sys.exit(1)
    test_data_loader = DataLoader(test_dataset,
                                  batch_size=opt.batchSize,
                                  shuffle=False,
                                  num_workers=int(opt.nThreads))
def data_aug():
    json_path = "./cityscapes/annotations/instancesonly_filtered_gtFine_train.json"
    bbox_list = {}
    with open(json_path) as json_file:
        data_city = json.load(json_file)
        for i in range(len(data_city['images'])):
            bbox_list[data_city['images'][i]['id']] = []
        for i in range(len(data_city['annotations'])):
            img_id = data_city['annotations'][i]['image_id']
            bbox = data_city['annotations'][i]['bbox']
            bbox_list[img_id].append(bbox)
        num_img = len(data_city['images'])
    opt = TestOptions().parse()
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    model = create_model(opt)
    dis_path = './distribution_bboxes_human'
    f1 = open(dis_path, 'rb')
    data2 = pickle.load(f1)
    points = np.array(data2['center'])
    x = points[:, 0]
    y = points[:, 1]
    mu = np.mean((x, y), axis=1)
    con = np.cov(x, y)
    f1.close()
    nn = 0
    while (nn < 10):
        r = random.randint(0, num_img - 1)
        img_id = data_city['images'][r]['id']
        image_path = './cityscapes/images/' + data_city['images'][r][
            'file_name']
        img = cv2.imread(image_path)
        added = 0
        AB = []
        bbox_dict = []
        add_bbox = []
        while (added < 6):
            h, w, = 180, 90
            sample = np.random.multivariate_normal(mean=mu, cov=con, size=1)
            random_x, random_y = sample[0]
            random_x = int(random_x)
            random_y = int(random_y)
            if random_x < 128 or random_x > 2048 - 128 or random_y < 128 or random_y > 1024 - 128:
                continue
            x1s = int(random_x - 128)
            x2s = int(random_x + 128)
            y1s = int(random_y - 128)
            y2s = int(random_y + 128)
            x1b = int(random_x - w / 2)
            x2b = int(random_x + w / 2)
            y1b = int(random_y - h / 2)
            y2b = int(random_y + h / 2)
            cover = 0
            for j in range(len(bbox_list[img_id])):
                x1 = int(bbox_list[img_id][j][0])
                y1 = int(bbox_list[img_id][j][2])
                w = int(bbox_list[img_id][j][1])
                h = int(bbox_list[img_id][j][3])
                x2 = x1 + w
                y2 = y1 + h
                img_ppl = img[y1:y2, x1:x2]
                left_column_max = max(x1, x1s)
                right_column_min = min(x2, x2s)
                up_row_max = max(y1, y1s)
                down_row_min = min(y2, y2s)
                if left_column_max >= right_column_min or down_row_min <= up_row_max:
                    cover = 0
                else:
                    cover = 1
                    break
            if cover == 1:
                continue
            for j in range(len(add_bbox)):
                x1 = int(add_bbox[j][0])
                y1 = int(add_bbox[j][2])
                w = int(add_bbox[j][1])
                h = int(add_bbox[j][3])
                x2 = x1 + w
                y2 = y1 + h
                img_ppl = img[y1:y2, x1:x2]
                left_column_max = max(x1, x1s)
                right_column_min = min(x2, x2s)
                up_row_max = max(y1, y1s)
                down_row_min = min(y2, y2s)
                if left_column_max >= right_column_min or down_row_min <= up_row_max:
                    cover = 0
                else:
                    cover = 1
                    break
            if cover == 1:
                continue
            if cover == 0:
                roi = img[y1s:y2s, x1s:x2s]
                bbox = img[y1b:y2b, x1b:x2b]
                bbox = cv2.cvtColor(bbox, cv2.COLOR_BGR2GRAY)
                bbox = sp_noise(bbox, 0.50)
                bbox = cv2.merge([bbox, bbox, bbox])
                noise_img = roi.copy()
                noise_img[y1b - random_y + 128:128 + y2b - random_y,
                          x1b - random_x + 128:128 - random_x + x2b] = bbox
                img_con = np.concatenate((roi, noise_img), axis=1)
                AB.append(img_con)
                dd = {
                    'x': x1b - random_x + 128,
                    'y': y1b - random_y + 128,
                    'w': 128 - random_x + x2b,
                    'h': 128 + y2b - random_y
                }
                bbox_dict.append(dd)
                add_bbox.append([x1b, y1b, x2b, y2b])
                bbox_list[img_id].append([x1b, y1b, x2b, y2b])
                added += 1
        from data.aligned_dataset2 import AlignedDataset
        aa = AlignedDataset()
        aa.initialize(opt, AB, bbox_dict)
        data_loader = torch.utils.data.DataLoader(
            aa,
            batch_size=opt.batchSize,
            shuffle=not opt.serial_batches,
            num_workers=0)
        # data_loader = CreateDataLoader(opt)
        dataset = data_loader
        for ii, data in enumerate(dataset):
            model.set_input(data)
            model.test()
            x1s = add_bbox[ii][0]
            y1s = add_bbox[ii][1]
            x2s = add_bbox[ii][2]
            y2s = add_bbox[ii][3]
            visuals = model.get_current_visuals()
            result = cv2.resize(visuals['D2_fake'], (x2s - x1s, y2s - y1s),
                                interpolation=cv2.INTER_CUBIC)
            img[y1s:y2s, x1s:x2s] = result
        #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        cv2.imwrite("./image_debug_city/" + str(nn) + "_fake.png", img)
        nn += 1
import time
import os
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer
from util import html

opt = TestOptions().parse()
opt.nThreads = 1   # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
model = create_model(opt)
visualizer = Visualizer(opt)
# create website
web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
# test
for i, data in enumerate(dataset):
    if i >= opt.how_many:
        break
    model.set_input(data)
    model.test()
    visuals = model.get_current_visuals()
    img_path = model.get_image_paths()
    print('process image... %s' % img_path)
    visualizer.save_images(webpage, visuals, img_path)
Ejemplo n.º 30
0
import os
from collections import OrderedDict
from torch.autograd import Variable
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
from util import html
import torch
import time

opt = TestOptions().parse(save=False)
opt.nThreads = 1   # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
visualizer = Visualizer(opt)
# create website
web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))

# test
if not opt.engine and not opt.onnx:
    model = create_model(opt)
    if opt.data_type == 16:
        model.half()
    elif opt.data_type == 8:
Ejemplo n.º 31
0
def data_aug():
    json_path = "./cityscapes/annotations/instancesonly_filtered_gtFine_train.json"
    bbox_list = {}
    with open(json_path) as json_file:
        data_city = json.load(json_file)
        for i in range(len(data_city['images'])):
            bbox_list[data_city['images'][i]['id']] = []
        for i in range(len(data_city['annotations'])):
            img_id = data_city['annotations'][i]['image_id']
            bbox = data_city['annotations'][i]['bbox']
            bbox_list[img_id].append(bbox)
        num_img = len(data_city['images'])
    opt = TestOptions().parse()
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip
    model = create_model(opt)
    dis_path = './distribution_bboxes_human'
    f1 = open(dis_path, 'rb')
    data2 = pickle.load(f1)
    points = np.array(data2['center'])
    x = points[:, 0]
    y = points[:, 1]
    mu = np.mean((x, y), axis=1)
    con = np.cov(x, y)
    f1.close()
    nn = 0

    with open(json_path) as json_file:
        js = json.load(json_file)
        # img_path=all_path_pickle('./data/cityscapes/leftImg8bit/train')
        # print(img_path)
        tt = len(js['annotations'])
        last_id = js['annotations'][tt - 1]['id']
        # aug_img=all_path_pickle('./possion_blending/img')
        aug_img = all_path_pickle('./aug_img/')
        # aug_mask=all_path_pickle('./possion_blending/mask')
        num_img = len(aug_img)
        total = 1
        for i in range(len(js['images'])):
            # city=str(data['images'][i]['file_name']).split['_'][0]
            file_name = js['images'][i]['file_name']
            img_id = js['images'][i]['id']
            city = file_name.split('_')[0]
            image_path = './cityscapes/leftImg8bit/train/' + js['images'][i][
                'file_name']
            img = cv2.imread(image_path)
            added = 0
            AB = []
            bbox_dict = []
            add_bbox = []
            tryout = 0
            while (added < 6):
                h, w, = 180, 90
                sample = np.random.multivariate_normal(mean=mu,
                                                       cov=con,
                                                       size=1)
                random_x, random_y = sample[0]
                random_x = int(random_x)
                random_y = int(random_y)
                if random_x < 128 or random_x > 2048 - 128 or random_y < 128 or random_y > 1024 - 128:
                    continue
                x1s = int(random_x - 128)
                x2s = int(random_x + 128)
                y1s = int(random_y - 128)
                y2s = int(random_y + 128)
                x1b = int(random_x - w / 2)
                x2b = int(random_x + w / 2)
                y1b = int(random_y - h / 2)
                y2b = int(random_y + h / 2)
                cover = 0
                for j in range(len(bbox_list[img_id])):
                    x1 = int(bbox_list[img_id][j][0])
                    y1 = int(bbox_list[img_id][j][1])
                    w = int(bbox_list[img_id][j][2])
                    h = int(bbox_list[img_id][j][3])
                    x2 = x1 + w
                    y2 = y1 + h
                    img_ppl = img[y1:y2, x1:x2]
                    left_column_max = max(x1, x1s)
                    right_column_min = min(x2, x2s)
                    up_row_max = max(y1, y1s)
                    down_row_min = min(y2, y2s)
                    if left_column_max >= right_column_min or down_row_min <= up_row_max:
                        cover = 0
                    else:
                        cover = 1
                        break
                if cover == 1:
                    tryout += 1
                    if tryout > 200:
                        print("no space in this image!")
                        break
                    continue
                for j in range(len(add_bbox)):
                    x1 = int(add_bbox[j][0])
                    y1 = int(add_bbox[j][1])
                    w = int(add_bbox[j][2])
                    h = int(add_bbox[j][3])
                    x2 = x1 + w
                    y2 = y1 + h
                    img_ppl = img[y1:y2, x1:x2]
                    left_column_max = max(x1, x1s)
                    right_column_min = min(x2, x2s)
                    up_row_max = max(y1, y1s)
                    down_row_min = min(y2, y2s)
                    if left_column_max >= right_column_min or down_row_min <= up_row_max:
                        cover = 0
                    else:
                        cover = 1
                        break
                if cover == 1:
                    tryout += 1
                    if tryout > 200:
                        print("no space in this image!")
                        break
                    continue
                if cover == 0:
                    tryout = 0
                    roi = img[y1s:y2s, x1s:x2s]
                    bbox = img[y1b:y2b, x1b:x2b]
                    bbox = cv2.cvtColor(bbox, cv2.COLOR_BGR2GRAY)
                    bbox = sp_noise(bbox, 0.5)
                    bbox = cv2.merge([bbox, bbox, bbox])
                    noise_img = roi.copy()
                    noise_img[y1b - random_y + 128:128 + y2b - random_y,
                              x1b - random_x + 128:128 - random_x + x2b] = bbox
                    img_con = np.concatenate((roi, noise_img), axis=1)
                    AB.append(img_con)
                    dd = {
                        'x': x1b - random_x + 128,
                        'y': y1b - random_y + 128,
                        'w': 128 - random_x + x2b,
                        'h': 128 + y2b - random_y
                    }
                    bbox_dict.append(dd)
                    add_bbox.append([x1b, y1b, 90, 180])
                    bbox_list[img_id].append([x1b, y1b, 90, 180])
                    added += 1
            from data.aligned_dataset2 import AlignedDataset
            aa = AlignedDataset()
            aa.initialize(opt, AB, bbox_dict)
            data_loader = torch.utils.data.DataLoader(
                aa,
                batch_size=opt.batchSize,
                shuffle=not opt.serial_batches,
                num_workers=0)
            # data_loader = CreateDataLoader(opt)
            dataset = data_loader
            for ii, data in enumerate(dataset):
                model.set_input(data)
                model.test()
                x1s = add_bbox[ii][0]
                y1s = add_bbox[ii][1]
                ws = add_bbox[ii][2]
                hs = add_bbox[ii][3]
                x2s = x1s + ws
                y2s = y1s + hs
                visuals = model.get_current_visuals()
                result = cv2.resize(visuals['D2_fake'], (ws, hs),
                                    interpolation=cv2.INTER_CUBIC)
                img[y1s:y2s, x1s:x2s] = result
                new_d = {
                    'iscrowd': 0,
                    'category_id': 24,
                    'bbox': [x1s, y1s, ws, hs],
                    'area': ws * hs,
                    'segmentation': {
                        'size': [1024, 2048],
                        'counts': ''
                    },
                    'image_id': img_id,
                    'id': str(last_id + total)
                }
                js['annotations'].append(new_d)
            #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            cv2.imwrite(image_path, img)
            #cv2.imwrite('./image_debug_city/tt.png', img)
            total += 1
            #if total % 100 == 0:
            print(total)
            nn += len(add_bbox)
    print(nn)
    with open(
            './cityscapes/annotations/instancesonly_filtered_gtFine_train.json',
            'w') as outfile:
        json.dump(js, outfile)