Example #1
0
 def save(self):
     im = cv2.resize(self.target_image, (self.search_image.shape[1], self.search_image.shape[0]))
     image = mask_image_with_mean_background(self.gt_mask, self.search_image, [0,255,0])
     image = np.concatenate([im] + [image],axis=1)
     image = np.concatenate([image] + [mask_image_with_mean_background(region_mask, self.search_image, [255,0,0]) for region_mask in self.region_masks],axis=1)
     pid = self.random_index[self.now_index-1]
     save_img('./output/imgs/%03d-%05d-%d-%.2f.jpg' % (self._epoch+1, self.now_index, pid,self._last_iou), image)
Example #2
0
def ffwd(data_in, paths_out, checkpoint_dir, device_t='/gpu:0', batch_size=4):

    # is_paths = type(data_in[0]) == str
    # if is_paths:
    #     assert len(data_in) == len(paths_out)
    img_shape = get_img(data_in[0]).shape
    # else:
    #     assert data_in.size[0] == len(paths_out)
    #     img_shape = X[0].shape

    g = tf.Graph()
    batch_size = min(len(paths_out), batch_size)
    curr_num = 0
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), tf.Session(
            config=soft_config) as sess:
        batch_shape = (batch_size, ) + img_shape
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        num_iters = int(len(paths_out) / batch_size)
        for i in range(num_iters):
            pos = i * batch_size
            curr_batch_out = paths_out[pos:pos + batch_size]
            # if is_paths:
            curr_batch_in = data_in[pos:pos + batch_size]
            X = np.zeros(batch_shape, dtype=np.float32)
            for j, path_in in enumerate(curr_batch_in):
                img = get_img(path_in)
                assert img.shape == img_shape
                X[j] = img
            # else:
            #     X = data_in[pos:pos+batch_size]

            _preds = sess.run(preds, feed_dict={img_placeholder: X})
            for j, path_out in enumerate(curr_batch_out):
                save_img(path_out, _preds[j])

        remaining_in = data_in[num_iters * batch_size:]
        remaining_out = paths_out[num_iters * batch_size:]
    if len(remaining_in) > 0:
        ffwd(remaining_in,
             remaining_out,
             checkpoint_dir,
             device_t=device_t,
             batch_size=1)
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)
    content_img = get_img(options.content, (256, 256, 3)).astype(np.float32)
    content_img = np.reshape(content_img, (1, ) + content_img.shape)
    prediction = ffwd(content_img, options.style)
    save_img(options.output_path, prediction)
    print('Image saved to {}'.format(options.output_path))
Example #4
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate,
        "checkpoint_restore": options.checkpoint_restore
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets,
        style_target,
        options.content_weight,
        options.style_weight,
        options.tv_weight,
        options.vgg_path
    ]

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        if options.test:
            assert options.test_dir is not False
            preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test, preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
def main():
    parser = build_parser()
    options = parser.parse_args()  # 获取命令行参数
    check_opts(options)  # 参数检查

    style_target = get_img(options.style)  # 三通道风格图,大小不固定
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,  # 训练多少轮,所有的训练数据集都训练过一次称为一个epoch,即一轮
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.
        batch_size,  # 批处理。一组多少张,itertions=epoch/batch即迭代一轮的迭代次数, epochs*(epoch/batch)即为总迭代次数
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate
    }  # !!!

    if options.slow:  # !!!
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, options.content_weight,
        options.style_weight, options.tv_weight, options.vgg_path
    ]  # 待使用参数

    for preds, losses, i, epoch in optimize(*args, **kwargs):  # 迭代!!!
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test, preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(
                    preds_path, preds.reshape((512, 512, 3))
                )  ####131 错误save_img(preds_path,img),I found a fix for this. Replace img with preds.reshape((512,512,3))
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
Example #6
0
 def _get_image(self, img_path):
     raw_img = load_img(img_path,
                        to_rgb=True,
                        color_img=self.data_conf['color_image'])
     height, width = raw_img.shape[:2]
     if height % 2**self.num_scales != 0 or width % 2**self.num_scales != 0 or self.data_conf[
             'use_crop']:
         crop_size = int(self.data_conf['crop_size']
                         ) if self.data_conf['use_crop'] else min(
                             height, width)
         crop_size = crop_size // 2**self.num_scales * 2**self.num_scales
         raw_img = central_crop(raw_img, crop_size)
     img_name = os.path.basename(img_path)
     save_img(raw_img, os.path.join(self.originals_dir, img_name))
     return raw_img
def _get_files(img_dir):
    files = utils.list_files(img_dir)
    return files


with tf.Session() as sess:
    batch_shape = (batch_size, 256, 256, 3)
    X_content_images = tf.placeholder(tf.float32,
                                      shape=batch_shape,
                                      name='X_content_images')
    preds = style_transfer_conv_net.net(X_content_images / 255.)
    saver = tf.train.Saver()
    saver.restore(sess, model_checkpoint_path)
    content_images = _get_files(content_image_path)
    num_images = 50
    num_iter = int(num_images / batch_size)
    for i in xrange(num_iter):
        selected_files = content_images[i * batch_size:(i + 1) * batch_size]
        X = np.zeros(batch_shape, dtype=np.float32)
        output_image_paths = []
        index = 0
        for index, selected_file in enumerate(selected_files):
            full_image_path = os.path.join(content_image_path, selected_file)
            output_image_paths.append(
                os.path.join(generated_image_path, selected_file))
            X[index] = utils.get_img(full_image_path, img_size=(256, 256, 3))

        generated_images = sess.run(preds, feed_dict={'X_content_images:0': X})
        for index, output_image_path in enumerate(output_image_paths):
            utils.save_img(output_image_path, generated_images[index])
Example #8
0
    def _denoise_image(self, img_path):
        img_name = os.path.basename(img_path)
        logging.info(f"Denoising {img_name} ...")

        device = torch.device(self.training['device'])
        logging.info(f"Device set: {device}")

        model = UNet(self.in_channels,
                     self.out_channels,
                     self.n_filters,
                     self.k_d,
                     self.k_u,
                     self.n_skips,
                     self.k_s,
                     upsampling=self.upsampling)
        model = model.to(device)
        model.train()
        logging.info("Model built!")

        optim = torch.optim.Adam(model.parameters(),
                                 lr=float(self.training['lr']))
        loss_layer = nn.MSELoss()
        logging.info("Optimizer built!")

        img = self._get_image(img_path)
        img_tensor = numpy2tensor(img,
                                  in_range=(0, 255),
                                  out_range=(-1, 1),
                                  device=device)
        z_in = torch.rand(1,
                          self.in_channels,
                          img_tensor.shape[2],
                          img_tensor.shape[3],
                          device=device) * 0.1

        results_dump = []
        gen_avg = None
        for i in tqdm(range(int(self.training['max_iter']))):
            optim.zero_grad()
            gen_img = model(z_in)
            loss = loss_layer(gen_img, img_tensor)
            loss.backward()
            optim.step()

            gen_img = gen_img.detach()
            if gen_avg is None:
                gen_avg = gen_img.clone()
            else:
                gen_avg = gen_avg * float(self.training['gamma']) + (
                    1 - float(self.training['gamma'])) * gen_img

            if self.training['use_ema']:
                gen_img = torch.clamp(gen_avg, min=-1.0, max=1.0)
            gen_img = tensor2numpy(gen_img,
                                   in_range=(-1, 1),
                                   out_range=(0, 255))

            if (i + 1) % int(self.training['print_every']) == 0:
                save_img(gen_img,
                         os.path.join(
                             self.logs_dir,
                             img_name.split('.')[0] +
                             f"_{i+1:04}.{img_name.split('.')[1]}"),
                         to_bgr=True)

        save_img(gen_img,
                 os.path.join(self.results_dir, img_name),
                 to_bgr=True)
        logging.info(f"Denoising of {img_name} finished!")
Example #9
0
def ffwd(data_in, paths_out, checkpoint_dir, device_t='/gpu:0', batch_size=4):
    assert len(paths_out) > 0
    is_paths = type(data_in[0]) == str
    if is_paths:
        assert len(data_in) == len(paths_out)
        img_shape = get_img(data_in[0]).shape
    else:
        assert data_in.size[0] == len(paths_out)
        img_shape = X[0].shape

    g = tf.Graph()
    batch_size = min(len(paths_out), batch_size)
    curr_num = 0
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), tf.Session(
            config=soft_config) as sess:

        batch_shape = (batch_size, ) + img_shape
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt:
                saver.restore(sess, ckpt)
            else:
                os.makedirs("fst_checkpoints", exist_ok=True)
                ckpt = os.path.dirname("fst_checkpoints")
                print(ckpt, "variable ckpt status")
                print("...model checkpoints directory created...")

        else:
            saver.restore(sess, checkpoint_dir)

        num_iters = int(len(paths_out) / batch_size)
        for i in range(num_iters):
            pos = i * batch_size
            curr_batch_out = paths_out[pos:pos + batch_size]
            if is_paths:
                curr_batch_in = data_in[pos:pos + batch_size]
                X = np.zeros(batch_shape, dtype=np.float32)
                for j, path_in in enumerate(curr_batch_in):
                    img = get_img(path_in)
                    assert img.shape == img_shape, \
                        'Images have different dimensions. ' +  \
                        'Resize images or use --allow-different-dimensions.'
                    X[j] = img
            else:
                X = data_in[pos:pos + batch_size]

            # to fix error 'tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized
            # value Variable_47'
            sess.run(tf.compat.v1.global_variables_initializer())
            sess.run(tf.local_variables_initializer())

            _preds = sess.run(preds, feed_dict={img_placeholder: X})
            for j, path_out in enumerate(curr_batch_out):
                save_img(path_out, _preds[j])

        remaining_in = data_in[num_iters * batch_size:]
        remaining_out = paths_out[num_iters * batch_size:]
    if len(remaining_in) > 0:
        ffwd(remaining_in,
             remaining_out,
             checkpoint_dir,
             device_t=device_t,
             batch_size=1)
Example #10
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]
    style_files = []
    if os.path.isfile(options.style):
        style_files.extend(options.style)
    else:
        style_files = _get_files(options.style)
    for style_file in style_files:
        print("-------------Started to train2014 model for style '%s'" %
              os.path.basename(style_file))
        style_target = get_img(style_file)
        checkpoint_dir = "checkpoint_" + os.path.splitext(
            os.path.basename(style_file))[0]
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)
        test_dir = "test_" + os.path.splitext(os.path.basename(style_file))[0]
        if options.test:
            if not os.path.exists(test_dir):
                os.makedirs(test_dir)
        kwargs = {
            "slow": options.slow,
            "epochs": options.epochs,
            "print_iterations": options.checkpoint_iterations,
            "batch_size": options.batch_size,
            "save_path": checkpoint_dir,
            "learning_rate": options.learning_rate
        }

        if options.slow:
            if options.epochs < 10:
                kwargs['epochs'] = 1000
            if options.learning_rate < 1:
                kwargs['learning_rate'] = 1e1

        args = [
            content_targets, style_target, options.content_weight,
            options.style_weight, options.tv_weight, options.vgg_path
        ]

        for preds, losses, i, epoch in optimize(*args, **kwargs):
            style_loss, content_loss, tv_loss, loss = losses

            print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
            to_print = (style_loss, content_loss, tv_loss)
            print('style: %s, content:%s, tv: %s' % to_print)
            if options.test:
                # assert options.test_dir != False
                preds_path = '%s/%s_%s.png' % (test_dir, epoch, i)
                if not options.slow:
                    ckpt_dir = os.path.dirname(checkpoint_dir)
                    evaluate.ffwd_to_img(options.test, preds_path,
                                         checkpoint_dir)
                else:
                    save_img(preds_path, preds)
        ckpt_dir = checkpoint_dir
        cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
        print("Training complete. For evaluation:\n    `%s`" % cmd_text)
Example #11
0
import src.utils as utils
import os

content_image_path = 'data/content_images/'
resized_content_image_path = 'data/resized_content_images/'


def _get_files(img_dir):
    files = utils.list_files(img_dir)
    return files

content_image_paths = _get_files(content_image_path)

for filename in content_image_paths:
    fullpath = os.path.join(content_image_path, filename)
    print fullpath
    image = utils.get_img(fullpath, img_size=(256, 256, 3))
    output_path = os.path.join(resized_content_image_path, filename)
    print output_path
    utils.save_img(output_path, image)
Example #12
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    # Load style and content images
    style_target = get_img(options.style)
    style_seg = get_img(options.style_seg)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets,  # Batch image paths
        style_target,  # Reference style *image*
        style_seg,  # Reference style segmentation map *image*
        options.content_weight,
        options.style_weight,
        options.tv_weight,
        options.photo_weight,
        options.vgg_path,
        options.deeplab_path,
        options.
        resized_dir,  # Batch image resized folder (intermediate from deeplab)
        options.
        seg_dir,  # Batch image segmentation folder (intermediate from deeplab)
        options.matting_dir
    ]

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, photo_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss, photo_loss)
        print('style: %s, content:%s, tv: %s, photo: %s' % to_print)
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test, preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)