예제 #1
0
 def __init__(self):
     self.opt = DemoOptions().parse()
     self.opt.data_dir = './datasets/frontend_upload'
     self.opt.checkpoints_dir = './checkpoints'
     self.model = Pix2PixModel(self.opt)
     self.model.opt.inpaint_mode = 'ref'
     self.model.eval()
예제 #2
0
파일: test.py 프로젝트: Vinkage/pr_project
def do_test(opt):
    dataloader = data.create_dataloader(opt)

    model = Pix2PixModel(opt)
    model.eval()

    visualizer = Visualizer(opt)

    # create a webpage that summarizes the all results
    web_dir = os.path.join(opt.results_dir, opt.name,
                           '%s_%s' % (opt.phase, opt.which_epoch))
    webpage = html.HTML(
        web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
        (opt.name, opt.phase, opt.which_epoch))

    # test
    for i, data_i in enumerate(dataloader):
        if i * opt.batchSize >= opt.how_many:
            break

        generated = model(data_i, mode='inference')

        img_path = data_i['path']
        for b in range(generated.shape[0]):
            print('process image... %s' % img_path[b])
            visuals = OrderedDict([('input_label', data_i['label'][b]),
                                   ('synthesized_image', generated[b])])
            visualizer.save_images(webpage, visuals, img_path[b:b + 1])

    webpage.save()
예제 #3
0
    def __init__(self, opt):
        self.opt = opt
        self.pix2pix_model = Pix2PixModel(opt)
        if len(opt.gpu_ids) > 0:
            self.pix2pix_model = DataParallelWithCallback(
                self.pix2pix_model, device_ids=opt.gpu_ids)
            self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
        else:
            self.pix2pix_model_on_one_gpu = self.pix2pix_model

        self.generated = None
        if opt.isTrain:
            if not opt.unpairTrain:
                (
                    self.optimizer_G,
                    self.optimizer_D,
                ) = self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            else:
                (
                    self.optimizer_G,
                    self.optimizer_D,
                    self.optimizer_D2,
                ) = self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr

        self.d_losses = {}
        self.nanCount = 0
    def __init__(self, opt, resume_epoch=0):
        self.opt = opt
        self.pix2pix_model = Pix2PixModel(opt)
        if len(opt.gpu_ids) > 1:
            self.pix2pix_model = DataParallelWithCallback(
                self.pix2pix_model, device_ids=opt.gpu_ids)
            self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
        else:
            self.pix2pix_model.to(opt.gpu_ids[0])
            self.pix2pix_model_on_one_gpu = self.pix2pix_model

        if opt.use_ema:
            self.netG_ema = EMA(opt.ema_beta)
            for name, param in self.pix2pix_model_on_one_gpu.net[
                    'netG'].named_parameters():
                if param.requires_grad:
                    self.netG_ema.register(name, param.data)
            self.netCorr_ema = EMA(opt.ema_beta)
            for name, param in self.pix2pix_model_on_one_gpu.net[
                    'netCorr'].named_parameters():
                if param.requires_grad:
                    self.netCorr_ema.register(name, param.data)

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = \
                self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr
            if opt.continue_train and opt.which_epoch == 'latest':
                checkpoint = torch.load(
                    os.path.join(opt.checkpoints_dir, opt.name,
                                 'optimizer.pth'))
                self.optimizer_G.load_state_dict(checkpoint['G'])
                self.optimizer_D.load_state_dict(checkpoint['D'])
        self.last_data, self.last_netCorr, self.last_netG, self.last_optimizer_G = None, None, None, None
예제 #5
0
 def __init__(self, image_q: Queue, done_dict, path):
     super(ProcessWorker, self).__init__()
     self.image_q = image_q
     self.done_dict = done_dict
     self.path = path
     self.model = Pix2PixModel(opt)
     self.model.eval()
     self.texture_size = 512
def test_face(old_face_folder,
              old_face_label_folder,
              name,
              gpu_ids,
              load_size,
              label_nc,
              preprocess_mode,
              batchSize,
              results_dir,
              checkpoints_dir="./checkpoints",
              how_many=50,
              tf_log=False,
              tensorboard_log=None,
              no_instance=False,
              no_parsing_map=False,
              serial_batches=False,
              nThreads=2,
              dataroot="./datasets/cityscapes/",
              isTrain=False,
              crop_size=512,
              aspect_ratio=1.0):

    dataloader = data.create_dataloader(batchSize, serial_batches, nThreads,
                                        isTrain, dataroot, old_face_folder,
                                        old_face_label_folder, load_size,
                                        preprocess_mode, crop_size, no_flip,
                                        aspect_ratio)

    model = Pix2PixModel(opt)
    model.eval()

    visualizer = Visualizer(name=name,
                            checkpoints_dir=checkpoints_dir,
                            results_dir=results_dir,
                            batchSize=batchSize,
                            label_nc=label_nc,
                            tf_log=tf_log,
                            tensorboard_log=tensorboard_log)

    single_save_url = os.path.join(checkpoints_dir, name, results_dir,
                                   "each_img")

    if not os.path.exists(single_save_url):
        os.makedirs(single_save_url)

    for i, data_i in enumerate(dataloader):
        if i * batchSize >= how_many:
            break

        generated = model(data_i, mode="inference")

        img_path = data_i["path"]

        for b in range(generated.shape[0]):
            img_name = os.path.split(img_path[b])[-1]
            save_img_url = os.path.join(single_save_url, img_name)

            vutils.save_image((generated[b] + 1) / 2, save_img_url)
예제 #7
0
    def __init__(self, opt):
        self.opt = opt
        self.pix2pix_model = Pix2PixModel(opt)
        self.pix2pix_model_on_one_gpu = self.pix2pix_model

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = \
                self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr
예제 #8
0
    def __init__(self, opt):
        self.opt = opt
        self.pix2pix_model = Pix2PixModel(opt)
        self.pix2pix_model = torch.nn.parallel.DistributedDataParallel(self.pix2pix_model,
                                    device_ids=[opt.gpu], find_unused_parameters=True)
        self.pix2pix_model_on_one_gpu = self.pix2pix_model.module

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr
예제 #9
0
def init(options):
    models = {}
    models['model'] = None
    models['Corr'] = None
    models['Gen'] = None
    if options.inference_mode == 'pytorch':
        models['model'] = Pix2PixModel(options)
        models['model'].eval()
    else:
        models['Corr'] = correspondence_model(options)
        models['Gen'] = generate_model(options)
    return models
예제 #10
0
    def __init__(self):
        opt = TestOptions()
        with open(model_config_path, 'r') as fp:
            saved_opts = yaml.safe_load(fp)
        for _opt in saved_opts:
            setattr(opt, _opt, saved_opts[_opt])

        model = Pix2PixModel(opt)
        model.eval()
        self.model = model

        # No longer need SPADE-master on PYTHONPATH.
        sys.path.pop()
    def __init__(self, opt):
        self.opt = opt
        self.pix2pix_model = Pix2PixModel(opt)
        if len(opt.gpu_ids) > 0:
            self.pix2pix_model = DataParallelWithCallback(
                self.pix2pix_model, device_ids=opt.gpu_ids)
            self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
        else:
            self.pix2pix_model_on_one_gpu = self.pix2pix_model

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = \
                self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr
예제 #12
0
def run(verbose=False):

    opt = TestOptions().parse(verbose=verbose)

    dataset_name = "coco"
    dataset_filename = "data." + dataset_name + "_dataset"

    datasetlib = importlib.import_module(dataset_filename)

    dataset = None
    target_dataset_name = dataset_name.replace('_', '') + 'dataset'

    for name, cls in datasetlib.__dict__.items():
        if name.lower() == target_dataset_name.lower() \
                and issubclass(cls, BaseDataset):
            dataset = cls

    instance = dataset()
    instance.initialize(opt)
    if verbose:
        print("dataset [%s] of size %d was created" %
              (type(instance).__name__, len(instance)))

    dataloader = torch.utils.data.DataLoader(instance,
                                             batch_size=opt.batchSize,
                                             shuffle=not opt.serial_batches,
                                             num_workers=int(opt.nThreads),
                                             drop_last=opt.isTrain)
    model = Pix2PixModel(opt, verbose)
    model.eval()
    visualizer = Visualizer(opt)
    if verbose:
        print(dataloader)
    for i, data_i in enumerate(dataloader):

        if i * opt.batchSize >= opt.how_many:
            break

        # this is just a dictionary that contains tensors and stuff?
        generated = model(data_i, mode='inference', verbose=verbose)
        img_path = data_i['path']
        for b in range(generated.shape[0]):
            # Should only be one?
            image_dir = os.path.join(os.path.dirname(__file__), "img")
            return visualizer.save_images(img_path[b:b + 1],
                                          generated[b],
                                          image_dir,
                                          verbose=verbose)
예제 #13
0
파일: spade.py 프로젝트: zahidna/ml4a
def load_model(model_name):
    assert model_name in pretrained_models, \
        '{} not recongized. Available models: {}'.format(model_name, ', '.join(pretrained_models))

    global model, opt, classes

    model_subfolder = os.path.join('SPADE/checkpoints/', model_name)
    checkpoint_dir = os.path.join(downloads.get_ml4a_downloads_folder(),
                                  model_subfolder)
    all_checkpoints_dir = os.path.join(downloads.get_ml4a_downloads_folder(),
                                       'SPADE/checkpoints/')

    for gdrive_id, filename in pretrained_models[model_name]:
        location = os.path.join(model_subfolder, filename)
        downloads.download_from_gdrive(gdrive_id, location)

    with open(os.path.join(checkpoint_dir, 'classes_list.txt'),
              'r') as classes_file:
        classes = eval(classes_file.read())

    opt_file = os.path.join(checkpoint_dir, 'opt.txt')
    parsed_opt = parse_opt_file(opt_file)

    opt = EasyDict({})
    opt.isTrain = False
    opt.checkpoints_dir = all_checkpoints_dir
    opt.name = model_name
    opt.aspect_ratio = float(parsed_opt['aspect_ratio'])
    opt.load_size = int(parsed_opt['load_size'])
    opt.crop_size = int(parsed_opt['crop_size'])
    opt.no_instance = True if parsed_opt['no_instance'] == 'True' else False
    opt.preprocess_mode = parsed_opt['preprocess_mode']
    opt.contain_dontcare_label = True if parsed_opt[
        'contain_dontcare_label'] == 'True' else False
    opt.gpu_ids = parsed_opt['gpu_ids']
    opt.netG = parsed_opt['netG']
    opt.ngf = int(parsed_opt['ngf'])
    opt.num_upsampling_layers = parsed_opt['num_upsampling_layers']
    opt.use_vae = True if parsed_opt['use_vae'] == 'True' else False
    opt.label_nc = int(parsed_opt['label_nc'])
    opt.semantic_nc = opt.label_nc + (1 if opt.contain_dontcare_label else
                                      0) + (0 if opt.no_instance else 1)
    opt.norm_G = parsed_opt['norm_G']
    opt.init_type = parsed_opt['init_type']
    opt.init_variance = float(parsed_opt['init_variance'])
    opt.which_epoch = parsed_opt['which_epoch']
    model = Pix2PixModel(opt)
    model.eval()
예제 #14
0
def setup(opts):
    global opt
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    options = Options()
    parser = options.initialize(parser)
    options.parser = parser
    name = opts['checkpoints_root'].split('/')[-1]
    checkpoints_dir = os.path.join(opts['checkpoints_root'], '..')
    parser.set_defaults(name=name)
    parser.set_defaults(checkpoints_dir=checkpoints_dir)
    opt = options.parse()
    opt.name = name
    opt.checkpoints_dir = checkpoints_dir
    model = Pix2PixModel(opt)
    model.eval()
    return model
예제 #15
0
def setup(opts):
    global opt
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    options = Options()
    parser = options.initialize(parser)
    name = 'Labels2Landscapes_512'
    checkpoints_dir = os.path.join('.', 'checkpoints')
    parser.set_defaults(name=name, checkpoints_dir=checkpoints_dir)
    options.parser = parser
    opt = options.parse()
    opt.name = name
    opt.checkpoints_dir = checkpoints_dir
    opt.gpu_ids = "0"
    device = torch.device('cuda:0')
    model = Pix2PixModel(opt)
    model.eval()
    return model
예제 #16
0
    def __init__(self):
        self.opt = TestOptions().parse()
        self.opt.crop_size = 512
        self.opt.load_size = 512
        self.opt.no_instance = True
        self.opt.preprocess_mode = "scale_width"
        self.opt.dataset_mode = "custom"
        self.opt.name = "landscape_pretrained"
        self.opt.cache_filelist_read = False
        self.opt.cache_filelist_write = False
        self.opt.semantic_nc = 182
        self.opt.contain_dontcare_label = False

        #Load the model
        self.model = Pix2PixModel(self.opt)
        self.model.eval()

        self.map = {
            110: 156,
            60: 154,
            128: 134,
            99: 149,
            108: 126,  #
            242: 105,
            214: 14,
            145: 124,
            237: 158,
            101: 147,  #
            70: 96,
            135: 168,
            65: 148,
            209: 143,
            150: 110,
            83: 125,
            120: 135,
            141: 119,
            156: 161,
            82: 177,
            230: 118,
            113: 123,
            232: 162
        }
        return
예제 #17
0
def setup(checkpoint_name):
    global model, opt, get_params, get_transform, util
    spade = eden.setup.get_external_repo_dir('spade')
    sys.path.insert(0, spade)

    from options.test_options import TestOptions
    from models.pix2pix_model import Pix2PixModel
    from options.base_options import BaseOptions
    from data.base_dataset import get_params, get_transform
    import util.util as util

    checkpoints_dir = os.path.join(spade, 'checkpoints')
    opt_file = os.path.join(os.path.join(checkpoints_dir, checkpoint_name),
                            'opt.txt')
    parsed_opt = parse_opt_file(opt_file)

    opt = eden.utils.DictMap()
    opt.isTrain = False
    opt.checkpoints_dir = checkpoints_dir
    opt.name = checkpoint_name
    opt.aspect_ratio = float(parsed_opt['aspect_ratio'])
    opt.load_size = int(parsed_opt['load_size'])
    opt.crop_size = int(parsed_opt['crop_size'])
    opt.label_nc = int(parsed_opt['label_nc'])
    opt.no_instance = True if parsed_opt['no_instance'] == 'True' else False
    opt.preprocess_mode = parsed_opt['preprocess_mode']
    opt.contain_dontcare_label = True if parsed_opt[
        'contain_dontcare_label'] == 'True' else False
    opt.gpu_ids = parsed_opt['gpu_ids']
    opt.netG = parsed_opt['netG']
    opt.ngf = int(parsed_opt['ngf'])
    opt.num_upsampling_layers = parsed_opt['num_upsampling_layers']
    opt.use_vae = True if parsed_opt['use_vae'] == 'True' else False
    opt.semantic_nc = opt.label_nc + (1 if opt.contain_dontcare_label else
                                      0) + (0 if opt.no_instance else 1)
    opt.norm_G = parsed_opt['norm_G']
    opt.init_type = parsed_opt['init_type']
    opt.init_variance = float(parsed_opt['init_variance'])
    opt.which_epoch = parsed_opt['which_epoch']

    model = Pix2PixModel(opt)
    model.eval()
예제 #18
0
def main():
    opt = TestOptions().parse()
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1
    opt.serial_batches = True  # no shuffle
    opt.no_flip = True  # no flip

    data_loader = CreateDataLoader(opt)
    dataset = data_loader.load_data()

    # read pix2pix/PAN model
    if opt.model == 'pix2pix':
        assert (opt.dataset_mode == 'aligned')
        from models.pix2pix_model import Pix2PixModel
        model = Pix2PixModel()
        model.initialize(opt)
    elif opt.model == 'pan':
        from models.pan_model import PanModel
        model = PanModel()
        model.initialize(opt)

    visualizer = Visualizer(opt)

    # create website
    web_dir = os.path.join(opt.results_dir, opt.name,
                           '%s_%s' % (opt.phase, opt.which_epoch))
    webpage = html.HTML(
        web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
        (opt.name, opt.phase, opt.which_epoch))

    # test
    for i, data in enumerate(dataset):
        if i >= opt.how_many:  # default 50 images
            break
        model.set_input(data)
        model.test()
        visuals = model.get_current_visuals()
        img_path = model.get_image_paths()
        print('process image... %s' % img_path)
        visualizer.save_images(webpage, visuals, img_path)

    webpage.save()
예제 #19
0
    def __init__(self, opt):
        self.opt = opt
        self.pix2pix_model = Pix2PixModel(opt)
        if len(opt.gpu_ids) > 0:
            self.pix2pix_model = DataParallelWithCallback(self.pix2pix_model,
                                                          device_ids=opt.gpu_ids)
            self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
        else:
            self.pix2pix_model_on_one_gpu = self.pix2pix_model

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = \
                self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr

        self.amp = True if AMP and opt.use_amp else False
        
        if self.amp:
            self.scaler_G = GradScaler()
            self.scaler_D = GradScaler()
예제 #20
0
def generate_one(label_img_path, image_path):
    opt = TestOptions().parse()

    label = Image.open(label_img_path)
    # label_np = np.array(label)
    params = get_params(opt, label.size)
    transform_label = get_transform(opt, params, method=Image.NEAREST, normalize=False)
    label_tensor = transform_label(label) * 255.0
    label_tensor[label_tensor == 255] = opt.label_nc
    label_tensor_batch = torch.unsqueeze(label_tensor,0)
    if opt.no_instance:
        instance_tensor = 0

    image = Image.open(image_path)
    image = image.convert('RGB')
    transform_image = get_transform(opt, params)
    image_tensor = transform_image(image)

    input_dict = {'label': label_tensor_batch,
                  'instance': instance_tensor,
                  'image': image_tensor,
                  }

    # 加载模型

    model = Pix2PixModel(opt)
    model.eval()
    ## 得到3*256*256的图片数据
    generated_image = model(input_dict, mode='inference')
    generated_image = torch.squeeze(generated_image)
    image_numpy = generated_image.numpy()
    image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
    image_numpy = np.clip(image_numpy, 0, 255)
    img = image_numpy.astype(np.uint8)

    # # 给定image_path即可保存
    # image_pil.save(image_path.replace('.jpg', '.png'))
    return img
예제 #21
0
    def __init__(self, opt):
        self.opt = opt
        self.pix2pix_model = Pix2PixModel(opt)
        self.pix2pix_model.netG = torch.nn.SyncBatchNorm.convert_sync_batchnorm(
            self.pix2pix_model.netG)
        self.pix2pix_model.load_networks()
        if not opt.isTrain:
            self.pix2pix_model.eval()
        self.pix2pix_model = self.pix2pix_model.cuda()

        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = self.pix2pix_model.create_optimizers(
                opt)
            self.old_lr = opt.lr
            self.scaler = amp.GradScaler()

        self.local_rank = torch.distributed.get_rank()
        self.pix2pix_model = torch.nn.parallel.DistributedDataParallel(
            self.pix2pix_model,
            device_ids=[self.local_rank],
            output_device=self.local_rank,
            find_unused_parameters=True)
        self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
예제 #22
0
    def run(self):
        # Setup

        CONFIG = self.deeplab_opt['CONFIG']
        self.CONFIG = CONFIG
        model_path = self.deeplab_opt['model_path']
        cuda = self.deeplab_opt['cuda']
        crf = self.deeplab_opt['crf']
        camera_id = self.deeplab_opt['camera_id']

        device = get_device(cuda)
        torch.set_grad_enabled(False)
        torch.backends.cudnn.benchmark = True

        classes = get_classtable(CONFIG)
        postprocessor = setup_postprocessor(CONFIG) if crf else None

        model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)
        state_dict = torch.load(model_path,
                                map_location=lambda storage, loc: storage)
        model.load_state_dict(state_dict)
        model.eval()
        model.to(device)
        print("Model:", CONFIG.MODEL.NAME)

        # SPADE model
        spade_model = Pix2PixModel(self.spade_opt)
        spade_model.eval()
        print("Spade!")
        print(spade_model)

        coco_dataset = CocoDataset()
        coco_dataset.initialize(self.spade_opt)
        print(coco_dataset)

        #cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)

        #np.set_printoptions(threshold=sys.maxsize)

        while True:
            if len(self.queue) > 0:
                frame = self.queue.pop()
                print("Original Image shape {}".format(frame.shape))
                image, raw_image = preprocessing(frame, device, CONFIG)
                print("Image shape {}".format(raw_image.shape))
                labelmap = inference(model, image, raw_image, postprocessor)

                labelmap[labelmap == 164] = 66
                labelmap[labelmap == 109] = 66

                #not_dining_mask = ((labelmap < 43) | (labelmap > 50)) & (labelmap != 66) & (labelmap != 0)
                #labelmap[not_dining_mask] = 156 # Sky
                # tables to dining table

                labelmap[labelmap == 66] = 154

                dining_objects_mask = (labelmap >= 43) & (labelmap <= 50) | (
                    labelmap == 138) | (labelmap == 142)
                appliances_mask = (labelmap >= 71) & (labelmap <= 79)
                labelmap[dining_objects_mask] = 149
                labelmap[appliances_mask] = 149

                not_sea_mask = (labelmap != 149) & (labelmap != 154)
                labelmap[not_sea_mask] = 156

                #colormap = self.colorize(labelmap)

                #labelmap[labelmap == 66] = 154

                # Frisby and more to sea?
                #labelmap[labelmap == 33] = 154
                #labelmap[labelmap == 66] = 154
                #labelmap[labelmap == 80] = 154
                #Bottle to flower?
                #labelmap[labelmap == 43] = 118
                # Person to rock?
                #labelmap[labelmap == 0] = 168
                #dog to person
                #labelmap[labelmap == 17] = 0

                # Sky grass and bottle flower
                #bottle_mask = (labelmap == 43)
                #labelmap[0:193,:] = 156
                #labelmap[:,:] = 123
                #labelmap[bottle_mask] = 118
                #print(labelmap.shape)
                #Bottle to potted plant
                #labelmap[labelmap == 43] = 63#
                #

                #dining_stuff = [43,44,45,46,47,48,49,50,66]
                #dining_mask = np.isin(labelmap, dining_stuff, invert=True)

                #not_dining_mask = (labelmap < 43) | (labelmap > 50) & (labelmap != 66)

                #labelmap[dining_objects_mask] = 63
                labelimg = Image.fromarray(np.uint8(labelmap), 'L')
                label_resized = np.array(
                    labelimg.resize((256, 256), Image.NEAREST))

                uniques = np.unique(labelmap)
                instance_counter = 0
                instancemap = np.zeros(labelmap.shape)
                print(uniques)

                for label_id in uniques:
                    mask = (labelmap == label_id)
                    instancemap[mask] = instance_counter
                    instance_counter += 1

                instanceimg = Image.fromarray(np.uint8(instancemap), 'L')

                #colormap[not_dining_mask] = [0, 0, 0];

                item = coco_dataset.get_item_from_images(labelimg, instanceimg)
                generated = spade_model(item, mode='inference')
                generated_np = util.tensor2im(generated[0])

                #color_resized = cv2.cvtColor(np.array(Image.fromarray(colormap).resize((256,256), Image.NEAREST)),cv2.COLOR_BGR2RGB)

                generated_np[label_resized == 156] = [0, 0, 0]

                # Masking
                #print("Generated image shape {} label resize shape {}".format(generated_np.shape, label_resized.shape))
                #generated_np[label_resized == 156, :] = [0, 0, 0];
                #generated_np[label_resized == 156, :] = [0, 0, 0];
                #not_dining_mask = (label_resized < 43) | (label_resized > 50) & (label_resized != 66)
                #generated_np[not_dining_mask] = [0, 0, 0];

                #only people
                #people_mask = np.isin(label_resized, [0,66], invert=True)
                #color_resized[label_resized != 0] = [0,0,0]

                #generated_rgb = cv2.cvtColor(generated_np, cv2.COLOR_BGR2RGB)
                #color_gray = cv2.cvtColor(color_resized, cv2.COLOR_BGR2GRAY)
                #color_gray_rgb = cv2.cvtColor(color_gray, cv2.COLOR_GRAY2RGB)
                #not_dining_resized = (label_resized < 43) | (label_resized > 50) & (label_resized != 66)
                #color_gray_rgb[label_resized != 154, :] = [0, 0, 0];

                #generated_np[label_resized == 154, :] = [0,0,0]

                #raw_image_resized = cv2.cvtColor(np.array(Image.fromarray(raw_image).resize((256,256), Image.NEAREST)),cv2.COLOR_BGR2RGB)
                #raw_image_resized[people_mask] = [0, 0, 0];

                #cv2.addWeighted(color_resized, 0.5, raw_image_resized, 0.5 , 0.0, raw_image_resized)

                #self.push_frame(raw_image_resized)
                #raw_rgb = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB)

                #final = np.concatenate((generated_np, color_resized), axis=1)
                #print("Gans shape {}, colormap shape {}, Final shape {}".format(generated_np.shape, color_resized.shape, final.shape))
                #final[:,:256,:] = generated_np
                #final[:,256:,:] = color_resized

                #raw_rgb = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB)
                #map_rgb = cv2.cvtColor(colormap, cv2.COLOR_BGR2RGB)
                #cv2.addWeighted(map_rgb, 0.5, raw_rgb, 0.5, 0.0, raw_rgb)
                #self.push_frame(raw_rgb)

                #print("raw image shape {}".format(raw_image.shape))
                #print("Generated image {}".format(generated_np))
                #print("raw image  {}".format(raw_image))

                # Register mouse callback function
                #cv2.setMouseCallback(window_name, self.mouse_event, labelmap)

                # Overlay prediction
                #cv2.addWeighted(colormap, 1.0, raw_image, 0.0, 0.0, raw_image)

                # Quit by pressing "q" key
                #cv2.imshow(window_name, raw_image)
                #cv2.resizeWindow(window_name, 1024,1024)
                #if cv2.waitKey(10) == ord("q"):
                #    break

                self.push_frame(generated_np)
예제 #23
0
def live(config_path, model_path, cuda, crf, camera_id):
    """
    Inference from camera stream
    """

    # Setup
    CONFIG = Dict(yaml.load(config_path))
    device = get_device(cuda)
    torch.set_grad_enabled(False)
    torch.backends.cudnn.benchmark = True

    classes = get_classtable(CONFIG)
    postprocessor = setup_postprocessor(CONFIG) if crf else None

    model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)
    state_dict = torch.load(model_path,
                            map_location=lambda storage, loc: storage)
    model.load_state_dict(state_dict)
    model.eval()
    model.to(device)
    print("Model:", CONFIG.MODEL.NAME)

    # SPADE model
    opt = TestOptions().parse()
    opt.use_vae = False
    spade_model = Pix2PixModel(opt)
    spade_model.eval()
    print("Spade!")
    print(spade_model)

    coco_dataset = CocoDataset()
    coco_dataset.initialize(opt)
    print(coco_dataset)

    # UVC camera stream
    cap = cv2.VideoCapture(camera_id)
    cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"YUYV"))

    def colorize(labelmap):
        print(labelmap.shape)
        # Assign a unique color to each label
        labelmap = labelmap.astype(np.float32) / CONFIG.DATASET.N_CLASSES
        colormap = cm.jet_r(labelmap)[..., :-1] * 255.0
        return np.uint8(colormap)

    def mouse_event(event, x, y, flags, labelmap):
        # Show a class name of a mouse-overed pixel
        label = labelmap[y, x]
        name = classes[label]
        print(name)

    window_name = "{} + {}".format(CONFIG.MODEL.NAME, CONFIG.DATASET.NAME)
    cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)

    np.set_printoptions(threshold=sys.maxsize)

    while True:
        _, frame = cap.read()
        image, raw_image = preprocessing(frame, device, CONFIG)
        #print("Image shape {}".format(image.shape))
        labelmap = inference(model, image, raw_image, postprocessor)

        # Frisby and more to sea?
        #labelmap[labelmap == 33] = 154
        #labelmap[labelmap == 66] = 154
        #labelmap[labelmap == 80] = 154
        #Bottle to flower?
        labelmap[labelmap == 43] = 118
        # Person to rock?
        #labelmap[labelmap == 0] = 168
        #dog to person
        #labelmap[labelmap == 17] = 0

        # Sky grass and bottle flower
        #bottle_mask = (labelmap == 43)
        #labelmap[0:193,:] = 156
        #labelmap[:,:] = 123
        #labelmap[bottle_mask] = 118

        #print(labelmap.shape)

        #colormap = colorize(labelmap)
        uniques = np.unique(labelmap)
        instance_counter = 0
        instancemap = np.zeros(labelmap.shape)
        print(uniques)
        for label_id in uniques:
            mask = (labelmap == label_id)
            instancemap[mask] = instance_counter
            instance_counter += 1

        labelimg = Image.fromarray(np.uint8(labelmap), 'L')
        instanceimg = Image.fromarray(np.uint8(instancemap), 'L')
        #labelimg.show()

        item = coco_dataset.get_item_from_images(labelimg, instanceimg)
        generated = spade_model(item, mode='inference')

        generated_np = util.tensor2im(generated[0])

        # Masking
        #print("Generated image shape {} label resize shape {}".format(generated_np.shape, label_resized.shape))
        #label_resized = np.array(labelimg.resize((256,256), Image.NEAREST))
        #generated_np[label_resized != 118, :] = [0, 0, 0];

        generated_rgb = cv2.cvtColor(generated_np, cv2.COLOR_BGR2RGB)

        #print("raw image shape {}".format(raw_image.shape))
        #print("Generated image {}".format(generated_np))
        #print("raw image  {}".format(raw_image))

        # Register mouse callback function
        cv2.setMouseCallback(window_name, mouse_event, labelmap)

        # Overlay prediction
        #cv2.addWeighted(colormap, 1.0, raw_image, 0.0, 0.0, raw_image)

        # Quit by pressing "q" key
        cv2.imshow(window_name, generated_rgb)
        cv2.resizeWindow(window_name, 1024, 1024)
        if cv2.waitKey(10) == ord("q"):
            break
                    elif self.menu.doFill:
                        sub_pos = (event.pos[0] - self.MENU_WIDTH, event.pos[1]
                                   )  # 转换为subsurface中pos
                        self.fill_by_cv2(sub_pos)
                    else:
                        sub_pos = (event.pos[0] - self.MENU_WIDTH, event.pos[1]
                                   )  # 转换为subsurface中pos
                        self.brush.start_draw(sub_pos)  # subsurface适配
                elif event.type == MOUSEMOTION:
                    sub_pos = (event.pos[0] - self.MENU_WIDTH, event.pos[1]
                               )  # 转换为subsurface中pos
                    self.brush.draw(sub_pos)  # subsurface适配
                elif event.type == MOUSEBUTTONUP:
                    self.brush.end_draw()

            self.menu.draw()
            pygame.display.update()


def main():
    app = Painter()
    app.run()


if __name__ == '__main__':

    # 加载模型
    model = Pix2PixModel(TestOptions().parse())
    model.eval()
    main()
예제 #25
0
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from models.pix2pix_model import Pix2PixModel

if __name__ == "__main__":

    logger = TensorBoardLogger('tb_logs', name='my_model')

    trainer = Trainer(logger=logger)
    net = Pix2PixModel(input_nc=3,
                       output_nc=3,
                       ngf=64,
                       ndf=64,
                       netG='unet_128',
                       gan_mode='vanilla',
                       norm='instance',
                       n_layers_D=3,
                       netD='n_layers',
                       lr=0.0002,
                       beta1=0.5,
                       device='cpu',
                       batch_size=3,
                       dataroot='/home/gujiaxin/桌面/AB',
                       load_size=(300, 300),
                       crop_size=(300, 300),
                       preprocess=['resize', 'crop', 'flip'],
                       lambda_L1=100.0)

    trainer.fit(net)
예제 #26
0
from util import html

opt = TestOptions().parse()
opt.nThreads = 1  # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip

data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()

# read pix2pix/PAN model
if opt.model == 'pix2pix':
    assert (opt.dataset_mode == 'aligned')
    from models.pix2pix_model import Pix2PixModel
    model = Pix2PixModel()
    model.initialize(opt)
elif opt.model == 'pan':
    from models.pan_model import PanModel
    model = PanModel()
    model.initialize(opt)

visualizer = Visualizer(opt)

# create website
web_dir = os.path.join(opt.results_dir, opt.name,
                       '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(
    web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
    (opt.name, opt.phase, opt.which_epoch))
예제 #27
0
    def run(self):
        # Setup

        CONFIG = self.deeplab_opt['CONFIG']
        self.CONFIG = CONFIG
        model_path = self.deeplab_opt['model_path']
        cuda = self.deeplab_opt['cuda']
        crf = self.deeplab_opt['crf']
        camera_id = self.deeplab_opt['camera_id']

        device = get_device(cuda)
        torch.set_grad_enabled(False)
        torch.backends.cudnn.benchmark = True

        classes = get_classtable(CONFIG)
        postprocessor = setup_postprocessor(CONFIG) if crf else None

        model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)
        state_dict = torch.load(model_path,
                                map_location=lambda storage, loc: storage)
        model.load_state_dict(state_dict)
        model.eval()
        model.to(device)
        print("Model:", CONFIG.MODEL.NAME)

        # SPADE model
        spade_model = Pix2PixModel(self.spade_opt)
        spade_model.eval()
        spade_model.to(device)
        print("Spade!")
        print(spade_model)

        coco_dataset = CocoDataset()
        coco_dataset.initialize(self.spade_opt)
        print(coco_dataset)

        while True:
            while not self.osc_queue.empty():
                item = self.osc_queue.get()
                self.process_queue(item)

            if len(self.queue) > 0:
                frame = self.queue.pop()
                #print("Original Image shape {}".format(frame.shape))
                image, raw_image = preprocessing(frame, device, CONFIG)
                raw_image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB)
                #print("Image shape {}".format(raw_image.shape))
                labelmap = inference(model, image, raw_image, postprocessor)

                if self.current_state == "test-bowl":
                    self.test_bowl(labelmap)

                uniques = np.unique(labelmap)
                print([ID_TO_LABEL[unique] for unique in uniques])

                if self.send_bowl and LABEL_TO_ID['bowl'] in uniques:
                    box = self.get_bounding_box_of(LABEL_TO_ID['bowl'],
                                                   labelmap)
                    (rmin, cmin, rmax, cmax) = box
                    coords = (int((cmin + cmax) / 2), int((rmin + rmax) / 2))
                    print("Bowl coords {} out of {} ".format(
                        coords, labelmap.shape))
                    self.t2i_client.send_message("/deeplab/bowl",
                                                 [coords[0], coords[1]])

                if not self.map_deeplab:
                    colormap = self.colorize(labelmap)
                    for masking in self.deeplab_masks:
                        mask = np.isin(labelmap,
                                       masking['items'],
                                       invert=masking['invert'])
                        colormap[mask, :] = [0, 0, 0]

                for mapping in self.maps:
                    mask = np.isin(labelmap,
                                   mapping['from'],
                                   invert=mapping['invert'])
                    if mapping['expand'] > 0:
                        mask = self.expand_mask(mask, mapping['expand'])
                    labelmap[mask] = mapping['to']

                if self.map_deeplab:
                    colormap = self.colorize(labelmap)
                    for masking in self.deeplab_masks:
                        mask = np.isin(labelmap,
                                       masking['items'],
                                       invert=masking['invert'])
                        colormap[mask, :] = [0, 0, 0]
                    if self.show_raw:
                        for masking in self.deeplab_masks:
                            mask = np.isin(labelmap,
                                           masking['items'],
                                           invert=masking['invert'])
                            raw_image[mask, :] = [0, 0, 0]

                if self.show_labels:
                    for unique in uniques:
                        box = self.get_bounding_box_of(unique, labelmap)
                        self.put_text_in_center(colormap, box,
                                                ID_TO_LABEL[unique])

                #color_resized = cv2.cvtColor(np.array(Image.fromarray(colormap).resize((256,256), Image.NEAREST)),cv2.COLOR_BGR2RGB)

                if self.show_gaugan:
                    uniques = np.unique(labelmap)
                    instance_counter = 0
                    instancemap = np.zeros(labelmap.shape)

                    for label_id in uniques:
                        mask = (labelmap == label_id)
                        instancemap[mask] = instance_counter
                        instance_counter += 1

                    instanceimg = Image.fromarray(np.uint8(instancemap), 'L')

                    labelimg = Image.fromarray(np.uint8(labelmap), 'L')
                    label_resized = np.array(
                        labelimg.resize((256, 256), Image.NEAREST))

                    item = coco_dataset.get_item_from_images(
                        labelimg, instanceimg)

                    generated = spade_model(item, mode='inference')
                    generated_np = util.tensor2im(generated[0])

                    for masking in self.gaugan_masks:
                        mask = np.isin(label_resized,
                                       masking['items'],
                                       invert=masking['invert'])
                        generated_np[mask, :] = [0, 0, 0]
                    print("SPADE Shape {}".format(generated_np.shape))
                else:
                    generated_np = np.uint8(np.zeros((256, 256, 3)))

                final = np.concatenate((generated_np, colormap, raw_image),
                                       axis=1)
                #final = np.concatenate((generated_np, colormap), axis=1)

                self.push_frame(final)
예제 #28
0
 def __init__(self, opt):
     self.model = Pix2PixModel(opt)
     self.model.eval()
예제 #29
0
import os
from collections import OrderedDict

import torch
import data
from options.test_options import TestOptions
from models.pix2pix_model import Pix2PixModel
from util.visualizer import Visualizer
from util import html

opt = TestOptions().parse()

dataloader = data.create_dataloader(opt)

model = Pix2PixModel(opt)
model.eval()

visualizer = Visualizer(opt)

# create a webpage that summarizes the all results
web_dir = os.path.join(opt.results_dir, opt.name,
                       '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(
    web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
    (opt.name, opt.phase, opt.which_epoch))

# test
for i, data_i in enumerate(dataloader):
    if i * opt.batchSize >= opt.how_many:
        break
예제 #30
0
 def __init__(self):
     self.model = Pix2PixModel(opt)
     self.model.eval()
     torch.cuda.set_device(3)
     self.cuda = torch.cuda.is_available()