示例#1
0
    def __init__(self, *args, **kwargs):
        self.G, self.D = kwargs.pop('models')
        self.args = kwargs.pop('args')
        self.args.content_layers = set(self.args.content_layers)
        self.args.style_layers = set(self.args.style_layers)
        self.layers = self.args.content_layers | self.args.style_layers

        print('Extract style feature from {} ...\n'.format(
            self.args.style_image_path))
        style_image = im_preprocess_vgg(imread(self.args.style_image_path),
                                        load_size=self.args.style_load_size,
                                        dtype=np.float32)
        style_image_var = Variable(chainer.dataset.concat_examples(
            [style_image], self.args.gpu),
                                   volatile='on')
        style_features = extract({'data': style_image_var}, self.D,
                                 self.args.style_layers)
        self.grams = {}
        for key, value in style_features.items():
            gram_feature = gram(value[0])
            _, w, h = gram_feature.shape
            gram_feature = F.broadcast_to(gram_feature,
                                          (self.args.batch_size, w, h))
            gram_feature.volatile = 'off'
            self.grams[key] = gram_feature

        super(StyleUpdater, self).__init__(*args, **kwargs)
示例#2
0
def main():
    parser = argparse.ArgumentParser(
        description='Predict a list of images wheather realistic or not')
    parser.add_argument('--gpu',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--model_path',
                        default='models/realismCNN_all_iter3.npz',
                        help='Path for pretrained model')
    parser.add_argument('--list_path', help='Path for file storing image list')
    parser.add_argument('--batch_size',
                        type=int,
                        default=10,
                        help='Batchsize of 1 iteration')
    parser.add_argument('--load_size',
                        type=int,
                        default=256,
                        help='Scale image to load_size')
    parser.add_argument('--result_path',
                        default='result.txt',
                        help='Path for file storing results')
    args = parser.parse_args()

    model = RealismCNN()
    print('Load pretrained model from {} ...'.format(args.model_path))
    serializers.load_npz(args.model_path, model)
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        model.to_gpu()  # Copy the model to the GPU

    print('Load images from {} ...'.format(args.list_path))
    dataset = chainer.datasets.ImageDataset(paths=args.list_path, root='')
    print('{} images in total loaded'.format(len(dataset)))
    data_iterator = chainer.iterators.SerialIterator(dataset,
                                                     args.batch_size,
                                                     repeat=False,
                                                     shuffle=False)

    scores = np.zeros((0, 2))
    for idx, batch in enumerate(data_iterator):
        print('Processing batch {}->{}/{} ...'.format(
            idx * args.batch_size + 1,
            min(len(dataset), (idx + 1) * args.batch_size), len(dataset)))
        batch = [
            im_preprocess_vgg(np.transpose(im, [1, 2, 0]), args.load_size)
            for im in batch
        ]
        batch = Variable(chainer.dataset.concat_examples(batch, args.gpu),
                         volatile='on')
        result = chainer.cuda.to_cpu(model(batch, dropout=False).data)
        scores = np.vstack((scores, np.mean(result, axis=(2, 3))))

    print('Processing DONE !')
    print('Saving result to {} ...'.format(args.result_path))
    with open(args.result_path, 'w') as f:
        for score in scores:
            f.write('{},{}\n'.format(score[0], score[1]))
示例#3
0
    def get_example(self, i):
        img = im_preprocess_vgg(imread(os.path.join(self._root,
                                                    self._paths[i])),
                                load_size=self._load_size,
                                dtype=self._dtype)
        if self._crop_size:
            _, w, h = img.shape
            sx, sy = numpy.random.randint(
                0, w - self._crop_size), numpy.random.randint(
                    0, h - self._crop_size)
            img = img[:, sx:sx + self._crop_size, sy:sy + self._crop_size]
        if self._flip and numpy.random.rand() > 0.5:
            img = img[:, :, ::-1]

        return img
示例#4
0
def main():
    parser = argparse.ArgumentParser(
        description='Modified Poisson image editing')
    parser.add_argument('--poisson_weight',
                        type=float,
                        default=1,
                        help='Weight for poisson loss')
    parser.add_argument('--content_weight',
                        type=float,
                        default=5e-4,
                        help='Weight for content loss')
    parser.add_argument('--tv_weight',
                        type=float,
                        default=1e-3,
                        help='Weight for tv loss')
    parser.add_argument('--n_iteration',
                        type=int,
                        default=3500,
                        help='# of iterations')
    parser.add_argument('--save_intervel',
                        type=int,
                        default=100,
                        help='save result every # of iterations')
    parser.add_argument('--rand_init',
                        type=lambda x: x == 'True',
                        default=True,
                        help='Random init input if True')

    parser.add_argument('--gpu',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument(
        '--data_root',
        default='/data1/wuhuikai/benchmark/TransientAttributes/imageCropped',
        help='Root folder for cropped transient attributes dataset')
    parser.add_argument('--load_size',
                        type=int,
                        default=224,
                        help='Scale image to load_size')
    parser.add_argument('--total_instance',
                        type=int,
                        default=175,
                        help='# of instance to run test')
    parser.add_argument('--seed', type=int, default=5, help='Random seed')
    parser.add_argument('--min_ratio',
                        type=float,
                        default=0.2,
                        help='Min ratio for size of random rect')
    parser.add_argument('--max_ratio',
                        type=float,
                        default=0.6,
                        help='Max ratio for size of random rect')
    parser.add_argument('--result_folder',
                        default='transient_attributes_result/modified_result',
                        help='Name for folder storing results')
    parser.add_argument('--result_name',
                        default='loss.txt',
                        help='Name for file saving loss change')
    args = parser.parse_args()

    print('Input arguments:')
    for key, value in vars(args).items():
        print('\t{}: {}'.format(key, value))
    print('')

    args.prefix_name = '_'.join(
        sorted([
            '{}({})'.format(key, value) for key, value in vars(args).items()
            if key in set([
                'poisson_weight', 'realism_weight', 'content_weight',
                'tv_weight', 'n_iteration', 'rand_init'
            ])
        ]))

    # Init image list
    print('Load images from {} ...'.format(args.data_root))
    folders = [
        folder for folder in os.listdir(args.data_root)
        if os.path.isdir(os.path.join(args.data_root, folder))
    ]
    imgs_each_folder = {
        folder: glob.glob(os.path.join(args.data_root, folder, '*'))
        for folder in folders
    }
    print('\t {} images in {} folders in total ...\n'.format(
        np.sum([len(v) for k, v in imgs_each_folder.items()]), len(folders)))

    # Init result folder
    if not os.path.isdir(args.result_folder):
        os.makedirs(args.result_folder)
    print('Result will save to {} ...\n'.format(args.result_folder))

    # Init Constant Variable
    args.W_laplace = Variable(make_kernel(
        3, 3,
        np.asarray([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]],
                   dtype=np.float32)),
                              volatile='auto')
    args.W_laplace.to_gpu()

    loss_change = []
    np.random.seed(args.seed)
    for i in range(args.total_instance):
        folder = np.random.choice(folders)
        print('Processing {}/{}, select folder {} ...'.format(
            i + 1, args.total_instance, folder))
        obj_path, bg_path = np.random.choice(imgs_each_folder[folder],
                                             2,
                                             replace=False)
        print('\tObj: {}, Bg: {} ...'.format(os.path.basename(obj_path),
                                             os.path.basename(bg_path)))

        obj_img = imread(obj_path)
        args.orig_size = obj_img.shape[:2]
        obj_vgg = im_preprocess_vgg(obj_img, args.load_size, dtype=np.float32)
        bg_vgg = im_preprocess_vgg(imread(bg_path),
                                   args.load_size,
                                   dtype=np.float32)
        args.shape = obj_vgg.shape

        # random rect
        w, h = np.asarray(
            np.random.uniform(args.min_ratio, args.max_ratio, 2) *
            args.load_size,
            dtype=np.uint32)
        sx, sy = np.random.randint(0, args.load_size - w), np.random.randint(
            0, args.load_size - h)

        expand_mask = np.zeros((1, args.load_size, args.load_size),
                               dtype=np.float32)
        expand_mask[:, sx:sx + w, sy:sy + h] = 1
        mask = np.zeros_like(expand_mask)
        mask[:, sx + 1:sx + w - 1, sy + 1:sy + h - 1] = 1
        inverse_mask = 1 - mask
        ## vars
        obj_var = Variable(chainer.dataset.concat_examples([obj_vgg],
                                                           args.gpu),
                           volatile='on')
        bg_var = Variable(chainer.dataset.concat_examples([bg_vgg], args.gpu),
                          volatile='auto')
        mask_var = F.broadcast_to(
            Variable(chainer.dataset.concat_examples([mask], args.gpu),
                     volatile='auto'), obj_var.shape)
        inverse_mask_var = F.broadcast_to(
            Variable(chainer.dataset.concat_examples([inverse_mask], args.gpu),
                     volatile='auto'), obj_var.shape)
        inverse_border_mask_var = F.broadcast_to(
            Variable(chainer.dataset.concat_examples([1 - expand_mask + mask],
                                                     args.gpu),
                     volatile='auto'), obj_var.shape)
        ## Laplace
        content_laplace = F.convolution_2d(
            obj_var, W=args.W_laplace, pad=1) * mask_var + F.convolution_2d(
                bg_var, W=args.W_laplace, pad=1) * inverse_mask_var
        content_laplace.volatile = 'off'

        copy_paste_vgg = obj_vgg * mask + bg_vgg * inverse_mask
        ## args
        args.content_laplace = content_laplace
        args.mask = mask
        args.inverse_mask = inverse_mask
        args.inverse_mask_var = inverse_mask_var
        args.inverse_border_mask_var = inverse_border_mask_var
        args.bg_vgg = bg_vgg
        args.bg_var = bg_var
        args.copy_paste_vgg = copy_paste_vgg
        args.im_name = 'folder_{}_obj_{}_bg_{}'.format(
            folder,
            os.path.splitext(os.path.basename(obj_path))[0],
            os.path.splitext(os.path.basename(bg_path))[0])

        args.iter = 0
        x_init = np.asarray(
            np.random.randn(*args.shape) * 0.001,
            dtype=np.float32) if args.rand_init else np.copy(copy_paste_vgg)
        print('\tOptimize start ...')
        res = minimize(color_adjust,
                       x_init,
                       args=(args),
                       method='L-BFGS-B',
                       jac=True,
                       options={
                           'maxiter': args.n_iteration,
                           'disp': True
                       })
        # Cut and paste loss
        args.iter = -1
        f0, _ = color_adjust(copy_paste_vgg, args)
        print('\tOptimize done, loss = {} from {}\n'.format(res.fun, f0))
        loss_change.append((args.im_name, f0, res.fun))

        args.iter = ''
        save_result(res.x, args)

    with open(os.path.join(args.result_folder, args.result_name), 'w') as f:
        for name, f0, fb in loss_change:
            f.write('{} {} {}\n'.format(name, f0, fb))
示例#5
0
def main():
    parser = argparse.ArgumentParser(
        description='Poisson image editing using RealismCNN')
    parser.add_argument('--poisson_weight',
                        type=float,
                        default=1,
                        help='Weight for poisson loss')
    parser.add_argument('--realism_weight',
                        type=float,
                        default=1e4,
                        help='Weight for realism loss')
    parser.add_argument('--content_weight',
                        type=float,
                        default=1,
                        help='Weight for content loss')
    parser.add_argument('--tv_weight',
                        type=float,
                        default=1e-1,
                        help='Weight for tv loss')
    parser.add_argument('--n_iteration',
                        type=int,
                        default=1000,
                        help='# of iterations')
    parser.add_argument('--save_intervel',
                        type=int,
                        default=100,
                        help='save result every # of iterations')
    parser.add_argument('--rand_init',
                        type=lambda x: x == 'True',
                        default=True,
                        help='Random init input if True')
    parser.add_argument('--content_layers',
                        type=str2list,
                        default='conv4_1',
                        help='Layers for content_loss, sperated by ;')

    parser.add_argument('--gpu',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--realism_model_path',
                        default='model/realismCNN_all_iter3.npz',
                        help='Path for pretrained Realism model')
    parser.add_argument('--content_model_path',
                        default='model/VGG_ILSVRC_19_layers.pkl',
                        help='Path for pretrained VGG model')
    parser.add_argument(
        '--data_root',
        default='/data1/wuhuikai/benchmark/Realistic/color_adjustment',
        help='Root folder for color adjustment dataset')
    parser.add_argument('--img_folder',
                        default='pngimages',
                        help='Folder for stroing images')
    parser.add_argument('--list_name',
                        default='list.txt',
                        help='Name for file storing image list')
    parser.add_argument('--load_size',
                        type=int,
                        default=224,
                        help='Scale image to load_size')
    parser.add_argument('--result_folder',
                        default='image_editing_result',
                        help='Name for folder storing results')
    parser.add_argument('--result_name',
                        default='loss.txt',
                        help='Name for file saving loss change')
    args = parser.parse_args()

    args.content_layers = set(args.content_layers)

    print('Input arguments:')
    for key, value in vars(args).items():
        print('\t{}: {}'.format(key, value))
    print('')

    args.prefix_name = '_'.join(
        sorted([
            '{}({})'.format(key, value) for key, value in vars(args).items()
            if key not in set([
                'realism_model_path', 'content_model_path', 'data_root',
                'img_folder', 'list_name', 'result_folder', 'result_name'
            ])
        ]))

    # Init CNN model
    realism_cnn = RealismCNN()
    print('Load pretrained Realism model from {} ...'.format(
        args.realism_model_path))
    serializers.load_npz(args.realism_model_path, realism_cnn)
    print('Load pretrained VGG model from {} ...\n'.format(
        args.content_model_path))
    with open(args.content_model_path, 'rb') as f:
        vgg = pickle.load(f)
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        realism_cnn.to_gpu()  # Copy the model to the GPU
        vgg.to_gpu()

    # Init image list
    im_root = os.path.join(args.data_root, args.img_folder)
    print('Load images from {} according to list {} ...'.format(
        im_root, args.list_name))
    with open(os.path.join(args.data_root, args.list_name)) as f:
        im_list = f.read().strip().split('\n')
    total = len(im_list)
    print('{} images loaded done!\n'.format(total))

    # Init result folder
    if not os.path.isdir(args.result_folder):
        os.makedirs(args.result_folder)
    print('Result will save to {} ...\n'.format(args.result_folder))

    # Init Constant Variable
    W_laplace = Variable(make_kernel(
        3, 3,
        np.asarray([[0, -1, 0], [-1, 4, -1], [0, -1, 0]], dtype=np.float32)),
                         volatile='auto')
    W_laplace.to_gpu()
    args.W_laplace = W_laplace
    W_sum = Variable(make_kernel(
        3, 3, np.asarray([[0, 1, 0], [1, 0, 1], [0, 1, 0]], dtype=np.float32)),
                     volatile='auto')
    W_sum.to_gpu()

    loss_change = []
    for idx, im_name in enumerate(im_list):
        print('Processing {}/{}, name = {} ...'.format(idx + 1, total,
                                                       im_name))
        obj_vgg = im_preprocess_vgg(imread(
            os.path.join(im_root, '{}_obj.png'.format(im_name))),
                                    args.load_size,
                                    dtype=np.float32)
        bg_vgg = im_preprocess_vgg(imread(
            os.path.join(im_root, '{}_bg.png'.format(im_name))),
                                   args.load_size,
                                   dtype=np.float32)
        expand_mask = im_preprocess_vgg(imread(
            os.path.join(im_root, '{}_softmask.png'.format(im_name))),
                                        args.load_size,
                                        sub_mean=False,
                                        dtype=np.uint8,
                                        preserve_range=False)

        args.orig_size = (args.load_size, args.load_size)
        args.shape = bg_vgg.shape
        ## mask
        mask = erosion(np.squeeze(expand_mask), np.ones((3, 3),
                                                        dtype=np.uint8))
        mask = np.asarray(mask[np.newaxis, :, :], dtype=np.float32)
        expand_mask = np.asarray(expand_mask, dtype=np.float32)
        inverse_mask = 1 - mask
        ## vars
        obj_var = Variable(chainer.dataset.concat_examples([obj_vgg],
                                                           args.gpu),
                           volatile='on')
        mask_var = F.broadcast_to(
            Variable(chainer.dataset.concat_examples([mask], args.gpu)),
            obj_var.shape)
        ## Laplace
        content_laplace = F.convolution_2d(obj_var, W=W_laplace, pad=1)
        content_laplace.volatile = 'off'
        # prefilled
        border = bg_vgg * expand_mask * inverse_mask
        border_var = Variable(chainer.dataset.concat_examples([border],
                                                              args.gpu),
                              volatile='on')
        border_sum = F.convolution_2d(border_var, W=W_sum, pad=1)
        border_sum.volatile = 'off'

        print('\tExtracting content image features ...')
        copy_paste_vgg = obj_vgg * mask + bg_vgg * inverse_mask
        copy_paste_var = Variable(chainer.dataset.concat_examples(
            [copy_paste_vgg], args.gpu),
                                  volatile='on')
        content_features = extract({'data': copy_paste_var}, vgg,
                                   args.content_layers)
        content_features = {
            key: value[0]
            for key, value in content_features.items()
        }
        for _, value in content_features.items():
            value.volatile = 'off'

        ## args
        args.vgg = vgg
        args.realism_cnn = realism_cnn
        args.border_sum = border_sum
        args.content_laplace = content_laplace
        args.content_features = content_features
        args.mask = mask
        args.mask_var = mask_var
        args.inverse_mask = inverse_mask
        args.bg_vgg = bg_vgg
        args.copy_paste_vgg = copy_paste_vgg
        args.im_name = im_name

        args.iter = 0
        x_init = np.asarray(
            np.random.randn(*args.shape) * 0.001,
            dtype=np.float32) if args.rand_init else np.copy(copy_paste_vgg)
        print('\tOptimize start ...')
        res = minimize(color_adjust,
                       x_init,
                       args=(args),
                       method='L-BFGS-B',
                       jac=True,
                       options={
                           'maxiter': args.n_iteration,
                           'disp': False
                       })
        # Cut and paste loss
        args.iter = -1
        f0, _ = color_adjust(copy_paste_vgg, args)
        print('\tOptimize done, loss = {} from {}\n'.format(res.fun, f0))
        loss_change.append((im_name, f0, res.fun))

        args.iter = ''
        save_result(res.x, args)

    with open(os.path.join(args.result_folder, args.result_name), 'w') as f:
        for name, f0, fb in loss_change:
            f.write('{} {} {}\n'.format(name, f0, fb))
示例#6
0
def main():
    parser = argparse.ArgumentParser(
        description='Predict a list of images wheather realistic or not')
    parser.add_argument('--gpu',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--model_path',
                        default='model/realismCNN_all_iter3.npz',
                        help='Path for pretrained model')
    parser.add_argument(
        '--data_root',
        default='/data1/wuhuikai/benchmark/Realistic/human_evaluation',
        help='Root folder for test dataset')
    parser.add_argument(
        '--sub_dataset',
        default='lalonde_and_efros_dataset',
        help=
        'Folder name for sub_dataset, including images subfolder, as well as image list'
    )
    parser.add_argument('--img_folder',
                        default='images',
                        help='Folder stroing images')
    parser.add_argument('--list_name',
                        default='list.txt',
                        help='Name for file storing image list')
    parser.add_argument('--label_name',
                        default='label.txt',
                        help='Name for file stroring groundtruth')
    parser.add_argument('--batch_size',
                        type=int,
                        default=10,
                        help='Batchsize of 1 iteration')
    parser.add_argument('--load_size',
                        type=int,
                        default=256,
                        help='Scale image to load_size')
    parser.add_argument('--result_name',
                        default='result.txt',
                        help='Name for file storing result')
    args = parser.parse_args()

    data_root = os.path.join(args.data_root, args.sub_dataset)
    print('Predict realism for images in {} ...'.format(data_root))

    model = RealismCNN()
    print('Load pretrained model from {} ...'.format(args.model_path))
    serializers.load_npz(args.model_path, model)
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        model.to_gpu()  # Copy the model to the GPU

    print('Load images from {} ...'.format(args.list_name))
    dataset = chainer.datasets.ImageDataset(
        paths=os.path.join(data_root, args.list_name),
        root=os.path.join(data_root, args.img_folder))
    print('{} images in total loaded'.format(len(dataset)))
    data_iterator = chainer.iterators.SerialIterator(dataset,
                                                     args.batch_size,
                                                     repeat=False,
                                                     shuffle=False)

    scores = np.zeros((0, 2))
    for idx, batch in enumerate(data_iterator):
        print('Processing batch {}->{}/{} ...'.format(
            idx * args.batch_size + 1,
            min(len(dataset), (idx + 1) * args.batch_size), len(dataset)))
        batch = [
            im_preprocess_vgg(np.transpose(im, [1, 2, 0]), args.load_size)
            for im in batch
        ]
        batch = Variable(chainer.dataset.concat_examples(batch, args.gpu),
                         volatile='on')
        result = chainer.cuda.to_cpu(model(batch, dropout=False).data)
        scores = np.vstack((scores, np.mean(result, axis=(2, 3))))

    print('Processing DONE !')
    print('Saving result to {} ...'.format(args.result_name))
    with open(os.path.join(data_root, args.result_name), 'w') as f:
        for score in scores:
            f.write('{}\t{}\t{}\n'.format(score[0], score[1],
                                          np.argmax(score)))

    if not args.label_name:
        return
    print('Load gt from {} ...'.format(args.label_name))
    with open(os.path.join(data_root, args.label_name)) as f:
        gts = np.asarray(f.readlines(), dtype=np.uint8)
    gts[gts > 0.5] = 1
    auc = roc_auc_score(gts, scores[:, 1])
    print('AUC score: {}'.format(auc))
def main():
    parser = argparse.ArgumentParser(
        description='Transfer style from src image to target image')
    parser.add_argument('--gpu',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--content_image',
                        default='images/towernight.jpg',
                        help='Content target image')
    parser.add_argument('--style_images',
                        type=str2list,
                        default='images/Starry_Night.jpg',
                        help='Style src images, sperated by ;')
    parser.add_argument(
        '--blend_weights',
        type=lambda x: np.array([float(i) for i in x.split(';')]),
        default=None,
        help='Weight for each style image, sperated by ;')

    parser.add_argument('--content_weight',
                        type=float,
                        default=5,
                        help='Weight for content loss')
    parser.add_argument('--style_weight',
                        type=float,
                        default=100,
                        help='Weight for style loss')
    parser.add_argument('--tv_weight',
                        type=float,
                        default=1e-3,
                        help='Weight for tv loss')
    parser.add_argument('--n_iteration',
                        type=int,
                        default=1000,
                        help='# of iterations')
    parser.add_argument('--normalize_gradients',
                        type=str2bool,
                        default=False,
                        help='Normalize gradients if True')
    parser.add_argument('--rand_init',
                        type=str2bool,
                        default=True,
                        help='Random init input if True')
    parser.add_argument('--content_load_size',
                        type=int,
                        default=512,
                        help='Scale content image to load_size')
    parser.add_argument('--style_load_size',
                        type=int,
                        default=512,
                        help='Scale style image to load_size')
    parser.add_argument('--original_color',
                        type=str2bool,
                        default=False,
                        help='Same color with content image if True')
    parser.add_argument('--style_color',
                        type=str2bool,
                        default=False,
                        help='Same color with style image if True')

    parser.add_argument('--content_layers',
                        type=str2list,
                        default='relu4_2',
                        help='Layers for content_loss, sperated by ;')
    parser.add_argument('--style_layers',
                        type=str2list,
                        default='relu1_1;relu2_1;relu3_1;relu4_1;relu5_1',
                        help='Layers for style_loss, sperated by ;')

    parser.add_argument('--model_path',
                        default='models/VGG_ILSVRC_19_layers.pkl',
                        help='Path for pretrained model')
    parser.add_argument('--out_folder',
                        default='images/result',
                        help='Folder for storing output result')
    parser.add_argument('--prefix',
                        default='',
                        help='Prefix name for output image')
    args = parser.parse_args()

    print('Load pretrained model from {} ...'.format(args.model_path))
    with open(args.model_path, 'rb') as f:
        model = pickle.load(f)
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        model.to_gpu()  # Copy the model to the GPU

    print('Load content image {} ...'.format(args.content_image))
    content_im_orig = imread(args.content_image)
    args.content_orig_size = content_im_orig.shape[:
                                                   2] if args.content_load_size else None
    content_im = im_preprocess_vgg(content_im_orig,
                                   load_size=args.content_load_size,
                                   dtype=np.float32)
    args.shape = content_im.shape
    print('Load style image(s) ...\n\t{}'.format('\t'.join(args.style_images)))
    style_images = [
        im_preprocess_vgg(imread(im_path),
                          load_size=args.style_load_size,
                          dtype=np.float32) for im_path in args.style_images
    ]

    if args.blend_weights is None:
        args.blend_weights = np.ones(len(style_images))
    args.blend_weights /= np.sum(args.blend_weights)
    print('Blending weight for each stype image: {}'.format(
        args.blend_weights))

    # Init x
    x = np.asarray(np.random.randn(*content_im.shape) * 0.001,
                   dtype=np.float32) if args.rand_init else np.copy(content_im)

    print('Extracting content image features ...')
    args.content_layers = set(args.content_layers)
    content_im = Variable(chainer.dataset.concat_examples([content_im],
                                                          args.gpu),
                          volatile='on')
    content_features = extract({'data': content_im}, model,
                               args.content_layers)
    content_features = {
        key: value[0]
        for key, value in content_features.items()
    }
    for _, value in content_features.items():
        value.volatile = 'off'

    print('Extracting style image features ...')
    grams = {}
    args.style_layers = set(args.style_layers)
    for i, style_image in enumerate(style_images):
        style_image = Variable(chainer.dataset.concat_examples([style_image],
                                                               args.gpu),
                               volatile='on')
        style_features = extract({'data': style_image}, model,
                                 args.style_layers)
        for key, value in style_features.items():
            gram_feature = gram(value[0])
            if key in grams:
                grams[key] += args.blend_weights[i] * gram_feature
            else:
                grams[key] = args.blend_weights[i] * gram_feature
    for _, value in grams.items():
        value.volatile = 'off'

    print('Optimize start ...')
    res = minimize(neural_style,
                   x,
                   args=(model, content_features, grams, args),
                   method='L-BFGS-B',
                   jac=True,
                   options={
                       'maxiter': args.n_iteration,
                       'disp': True
                   })
    loss0, _ = neural_style(x, model, content_features, grams, args)

    print('Optimize done, loss = {}, with loss0 = {}'.format(res.fun, loss0))
    img = im_deprocess_vgg(np.reshape(res.x, args.shape),
                           orig_size=args.content_orig_size,
                           dtype=np.uint8)
    if args.original_color:
        img = original_colors(content_im_orig, img)
    if args.style_color:
        img = style_colors(content_im_orig, img)
    img = np.asarray(img, dtype=np.uint8)

    # Init result list
    if not os.path.isdir(args.out_folder):
        os.makedirs(args.out_folder)
    print('Result will save to {} ...\n'.format(args.out_folder))

    name = '{}_with_style(s)'.format(
        os.path.splitext(os.path.basename(args.content_image))[0])
    for path in args.style_images:
        name = '{}_{}'.format(name,
                              os.path.splitext(os.path.basename(path))[0])
    if args.prefix:
        name = '{}_{}'.format(args.prefix, name)
    imsave(os.path.join(args.out_folder, '{}.png'.format(name)), img)
def main():
    parser = argparse.ArgumentParser(
        description='Image editing using RealismCNN')
    parser.add_argument('--gpu',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--model_path',
                        default='model/realismCNN_all_iter3.npz',
                        help='Path for pretrained model')
    parser.add_argument(
        '--data_root',
        default='/data1/wuhuikai/benchmark/Realistic/color_adjustment',
        help='Root folder for color adjustment dataset')
    parser.add_argument('--img_folder',
                        default='pngimages',
                        help='Folder for stroing images')
    parser.add_argument('--list_name',
                        default='list.txt',
                        help='Name for file storing image list')
    parser.add_argument('--load_size',
                        type=int,
                        default=224,
                        help='Scale image to load_size')
    parser.add_argument('--result_folder',
                        default='result',
                        help='Name for folder storing results')
    parser.add_argument('--result_name',
                        default='loss.txt',
                        help='Name for file saving loss change')
    args = parser.parse_args()

    args.weight = 50  # regulaziation weight
    args.seeds = np.arange(0.6, 1.6, 0.2)  # multiple initiailization
    args.bounds = [(0.4, 2.0), (0.4, 2.0), (0.4, 2.0), (-0.5, 0.5),
                   (-0.5, 0.5), (-0.5, 0.5)]
    # bounds for search range

    # Init CNN model
    model = RealismCNN()
    print('Load pretrained model from {} ...\n'.format(args.model_path))
    serializers.load_npz(args.model_path, model)
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        model.to_gpu()  # Copy the model to the GPU

    # Init image list
    im_root = os.path.join(args.data_root, args.img_folder)
    print('Load images from {} according to list {} ...'.format(
        im_root, args.list_name))
    with open(os.path.join(args.data_root, args.list_name)) as f:
        im_list = f.read().strip().split('\n')
    total = len(im_list)
    print('{} images loaded done!\n'.format(total))

    # Init result list
    if not os.path.isdir(args.result_folder):
        os.makedirs(args.result_folder)
    print('Result will save to {} ...\n'.format(args.result_folder))

    loss_change = []
    for idx, im_name in enumerate(im_list):
        print('Processing {}/{} ...'.format(idx + 1, total))
        obj = im_preprocess_vgg(imread(
            os.path.join(im_root, '{}_obj.png'.format(im_name))),
                                args.load_size,
                                sub_mean=False,
                                dtype=np.float32)
        bg = im_preprocess_vgg(imread(
            os.path.join(im_root, '{}_bg.png'.format(im_name))),
                               args.load_size,
                               sub_mean=False,
                               dtype=np.float32)
        mask = im_preprocess_vgg(imread(
            os.path.join(im_root, '{}_softmask.png'.format(im_name))),
                                 args.load_size,
                                 sub_mean=False,
                                 dtype=np.float32,
                                 preserve_range=False)

        xs = []
        fvals = []
        for n in range(args.seeds.size):
            x0 = np.zeros(6)
            x0[:3] = args.seeds[n]
            print('\tOptimize start with seed {} ...'.format(args.seeds[n]))
            res = minimize(color_adjust,
                           x0,
                           args=(obj, bg, mask, model, args),
                           method='L-BFGS-B',
                           jac=True,
                           bounds=args.bounds,
                           tol=1e-3)
            xs.append(res.x)
            fvals.append(res.fun)
            print('\tOptimize done, loss = {} \n'.format(fvals[-1]))

        # Cut and paste loss
        x0 = np.array([1, 1, 1, 0, 0, 0], dtype=np.float32)
        f0, _ = color_adjust(x0, obj, bg, mask, model, args)

        # Best x
        best_idx = np.argmin(fvals)
        print(
            '\tBest seed = {}, loss = {}, cut_and_paste loss = {}, x = {}\n\n'.
            format(args.seeds[best_idx], fvals[best_idx], f0, xs[best_idx]))
        loss_change.append((im_name, f0, fvals[best_idx]))

        edited, _ = composite_img(xs[best_idx], obj, bg, mask)
        cut_and_pase, _ = composite_img(x0, obj, bg, mask)
        result = np.concatenate((cut_and_pase, edited), axis=2)
        result = np.asarray(np.transpose(result[::-1, :, :], (1, 2, 0)),
                            dtype=np.uint8)
        imsave(os.path.join(args.result_folder, '{}.png'.format(im_name)),
               result)

    with open(os.path.join(args.result_folder, args.result_name), 'w') as f:
        for name, f0, fb in loss_change:
            f.write('{} {} {}\n'.format(name, f0, fb))