예제 #1
0
    feat_chans = 512
    feat_style_shape = (feat_chans, 12, 18)
    feat_style = np.random.uniform(0.0, 1.0, feat_style_shape)
    feat_in_shape = (feat_chans, 17, 10)
    feat_in = np.random.uniform(0.0, 1.0, feat_in_shape)
    matcher = PatchMatcher(feat_in_shape[::-1],
                           feat_style,
                           patch_size=patch_size)
    feat_in_normed = matcher.normalize_patches(
        matcher.get_patches_for(feat_in))
    for i in range(num_steps):
        matcher.update_with_patches(feat_in_normed)
    r = matcher.get_reconstruction()

    content_img_img = load_image(content_image_path)
    content_n_channels, content_n_rows, content_n_cols = content_img_img.shape[::
                                                                               -1]
    content_img = preprocess_image(content_img_img, content_n_cols,
                                   content_n_rows)[0]  #.transpose((2,1,0))
    style_img = load_image(style_image_path)
    style_n_channels, style_n_rows, style_n_cols = content_img_img.shape[::-1]
    style_img = preprocess_image(load_image(style_image_path), style_n_cols,
                                 style_n_rows)[0]  #.transpose((2,1,0))
    pg = make_patch_grid(content_img, patch_size)
    result = combine_patches_grid(pg, content_img.shape[::-1])
    outimg = deprocess_image(result, contrast_percent=0)
    imsave(output_prefix + '_bestre.png', outimg)

    # # #
    matcher = PatchMatcher(
예제 #2
0
    num_steps = 7
    patch_size = 1
    patch_stride = 1

    feat_chans = 512
    feat_style_shape = (feat_chans, 12, 18)
    feat_style = np.random.uniform(0.0, 1.0, feat_style_shape)
    feat_in_shape = (feat_chans, 17, 10)
    feat_in = np.random.uniform(0.0, 1.0, feat_in_shape)
    matcher = PatchMatcher(feat_in_shape[::-1], feat_style, patch_size=patch_size)
    feat_in_normed = matcher.normalize_patches(matcher.get_patches_for(feat_in))
    for i in range(num_steps):
        matcher.update_with_patches(feat_in_normed)
    r = matcher.get_reconstruction()

    content_img_img = load_image(content_image_path)
    content_n_channels, content_n_rows, content_n_cols = content_img_img.shape[::-1]
    content_img = preprocess_image(content_img_img, content_n_cols, content_n_rows)[0]#.transpose((2,1,0))
    style_img = load_image(style_image_path)
    style_n_channels, style_n_rows, style_n_cols = content_img_img.shape[::-1]
    style_img =  preprocess_image(
        load_image(style_image_path), style_n_cols, style_n_rows)[0]#.transpose((2,1,0))
    pg = make_patch_grid(content_img, patch_size)
    result = combine_patches_grid(pg, content_img.shape[::-1])
    outimg = deprocess_image(result, contrast_percent=0)
    imsave(output_prefix + '_bestre.png', outimg)

    # # #
    matcher = PatchMatcher((content_n_cols, content_n_rows, content_n_channels), style_img, patch_size=patch_size)
    for i in range(num_steps):
        start = time.time()
예제 #3
0
    num_steps = 7
    patch_size = 1
    patch_stride = 1

    feat_chans = 512
    feat_style_shape = (feat_chans, 12, 18)
    feat_style = np.random.uniform(0.0, 1.0, feat_style_shape)
    feat_in_shape = (feat_chans, 17, 10)
    feat_in = np.random.uniform(0.0, 1.0, feat_in_shape)
    matcher = PatchMatcher(feat_in_shape[::-1], feat_style, patch_size=patch_size)
    feat_in_normed = matcher.normalize_patches(matcher.get_patches_for(feat_in))
    for i in range(num_steps):
        matcher.update_with_patches(feat_in_normed)
    r = matcher.get_reconstruction()

    content_img_img = load_image(content_image_path)
    content_n_channels, content_n_rows, content_n_cols = content_img_img.shape[::-1]
    content_img = preprocess_image(content_img_img, content_n_cols, content_n_rows)[0]#.transpose((2,1,0))
    style_img = load_image(style_image_path)
    style_n_channels, style_n_rows, style_n_cols = content_img_img.shape[::-1]
    style_img =  preprocess_image(
        load_image(style_image_path), style_n_cols, style_n_rows)[0]#.transpose((2,1,0))
    pg = make_patch_grid(content_img, patch_size)
    result = combine_patches_grid(pg, content_img.shape[::-1])
    outimg = deprocess_image(result, contrast_percent=0)
    imsave(output_prefix + '_bestre.png', outimg)

    # # #
    matcher = PatchMatcher((content_n_cols, content_n_rows, content_n_channels), style_img, patch_size=patch_size)
    for i in range(num_steps):
        start = time.time()
예제 #4
0
def main(args, model_class):
    '''The main loop which does the things.'''
    K.set_image_dim_ordering('th')
    # calculate scales
    if args.num_scales > 1:
        step_scale_factor = (1 - args.min_scale) / (args.num_scales - 1)
    else:
        step_scale_factor = 0.0
        args.min_scale = 1.0
    # prepare the input images
    full_asem_image = img_utils.load_image(
        args.asem_image_path)  #style_semlight image
    full_a_image = img_utils.load_image(args.a_image_path)  #style image
    full_b_image = img_utils.load_image(args.b_image_path)  #content image
    full_bsem_image = img_utils.load_image(
        args.bsem_image_path)  #content_semlight image
    # calculate the output size
    full_img_width, full_img_height = calculate_image_dims(
        args, full_bsem_image)
    img_num_channels = 3  # TODO: allow alpha
    b_scale_ratio_width = float(full_bsem_image.shape[1]) / full_img_width
    b_scale_ratio_height = float(full_bsem_image.shape[0]) / full_img_height
    # ensure the output dir exists
    output_dir = os.path.dirname(args.result_prefix)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # multi-scale loop
    x = None  # this is going to hold our output image
    optimizer = Optimizer()
    for scale_i in range(args.num_scales):
        scale_factor = (scale_i * step_scale_factor) + args.min_scale
        # scale our inputs
        img_width = int(round(full_img_width * scale_factor))
        img_height = int(round(full_img_height * scale_factor))
        # prepare the current optimizer state
        if x is None:  # we need to create an initial state
            x = np.random.uniform(0, 255, (img_height, img_width, 3)).astype(
                np.float32)
            x = vgg16.img_to_vgg(x)
        else:  # resize the last state
            zoom_ratio = img_width / float(x.shape[-1])
            x = scipy.ndimage.zoom(x, (1, zoom_ratio, zoom_ratio), order=1)
            img_height, img_width = x.shape[-2:]
        # determine scaling of "A" images
        if args.a_scale_mode == 'match':
            a_img_width = img_width
            a_img_height = img_height
        elif args.a_scale_mode == 'none':
            a_img_width = full_asem_image.shape[1] * scale_factor
            a_img_height = full_asem_image.shape[0] * scale_factor
        else:  # should just be 'ratio'
            a_img_width = full_asem_image.shape[
                1] * scale_factor * b_scale_ratio_width
            a_img_height = full_asem_image.shape[
                0] * scale_factor * b_scale_ratio_height
        a_img_width = int(round(args.a_scale * a_img_width))
        a_img_height = int(round(args.a_scale * a_img_height))
        # prepare images for use
        a_sem_image = img_utils.preprocess_image(full_asem_image, a_img_width,
                                                 a_img_height)
        a_image = img_utils.preprocess_image(full_a_image, a_img_width,
                                             a_img_height)
        b_sem_image = img_utils.preprocess_image(full_bsem_image, img_width,
                                                 img_height)
        b_image = img_utils.preprocess_image(full_b_image, img_width,
                                             img_height)
        print('Scale factor {} "A" shape {} "B" shape {}'.format(
            scale_factor, a_image.shape, b_image.shape))
        # load up the net and create the model
        net = vgg16.get_model(img_width,
                              img_height,
                              weights_path=args.vgg_weights,
                              pool_mode=args.pool_mode)
        model = model_class(net, args)
        model.build(a_sem_image, a_image, b_sem_image, b_image,
                    (1, img_num_channels, img_height, img_width))

        for i in range(args.num_iterations_per_scale):
            print('Start of iteration {} x {}'.format(scale_i, i))
            start_time = time.time()
            if args.color_jitter:
                color_jitter = (args.color_jitter * 2) * (np.random.random(
                    (3, img_height, img_width)) - 0.5)
                x += color_jitter
            if args.jitter:
                jitter = args.jitter * scale_factor
                ox, oy = np.random.randint(-jitter, jitter + 1, 2)
                x = np.roll(np.roll(x, ox, -1), oy, -2)  # apply jitter shift

            x, min_val, info = optimizer.optimize(
                x, model)  # update (go to evaluator, loss, grads calculation)
            print('Current loss value: {}'.format(min_val))

            x = x.reshape((3, img_height, img_width))
            if args.jitter:
                x = np.roll(np.roll(x, -ox, -1), -oy, -2)  # unshift image
            if args.color_jitter:
                x -= color_jitter
            # save the image
            if args.output_full_size:
                out_resize_shape = (full_img_height, full_img_width)
            else:
                out_resize_shape = None
            img = img_utils.deprocess_image(
                np.copy(x),
                contrast_percent=args.contrast_percent,
                resize=out_resize_shape)
            fname = args.result_prefix + '_at_iteration_{}_{}.png'.format(
                scale_i, i)
            imsave(fname, img)
            end_time = time.time()
            print('Image saved as {}'.format(fname))
            print('Iteration completed in {:.2f} seconds'.format(
                end_time - start_time, ))
예제 #5
0
def main(args, model_class):
    '''The main loop which does the things.'''
    K.set_image_dim_ordering('th')
    # calculate scales
    if args.num_scales > 1:
        step_scale_factor = (1 - args.min_scale) / (args.num_scales - 1)
    else:
        step_scale_factor = 0.0
        args.min_scale = 1.0
    # prepare the input images
    full_ap_image = img_utils.load_image(args.ap_image_path)
    full_a_image = img_utils.load_image(args.a_image_path)
    full_b_image = img_utils.load_image(args.b_image_path)
    # calculate the output size
    full_img_width, full_img_height = calculate_image_dims(args, full_b_image)
    img_num_channels = 3  # TODO: allow alpha
    b_scale_ratio_width = float(full_b_image.shape[1]) / full_img_width
    b_scale_ratio_height = float(full_b_image.shape[0]) / full_img_height
    # ensure the output dir exists
    output_dir = os.path.dirname(args.result_prefix)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # multi-scale loop
    x = None  # this is going to hold our output image
    optimizer = Optimizer()
    for scale_i in range(args.num_scales):
        scale_factor = (scale_i * step_scale_factor) + args.min_scale
        # scale our inputs
        img_width = int(round(full_img_width * scale_factor))
        img_height = int(round(full_img_height * scale_factor))
        # prepare the current optimizer state
        if x is None:  # we need to create an initial state
            x = np.random.uniform(0, 255, (img_height, img_width, 3)).astype(np.float32)
            x = vgg16.img_to_vgg(x)
        else:  # resize the last state
            zoom_ratio = img_width / float(x.shape[-1])
            x = scipy.ndimage.zoom(x, (1, zoom_ratio, zoom_ratio), order=1)
            img_height, img_width = x.shape[-2:]
        # determine scaling of "A" images
        if args.a_scale_mode == 'match':
            a_img_width = img_width
            a_img_height = img_height
        elif args.a_scale_mode == 'none':
            a_img_width = full_a_image.shape[1] * scale_factor
            a_img_height = full_a_image.shape[0] * scale_factor
        else:  # should just be 'ratio'
            a_img_width = full_a_image.shape[1] * scale_factor * b_scale_ratio_width
            a_img_height = full_a_image.shape[0] * scale_factor * b_scale_ratio_height
        a_img_width = int(round(args.a_scale * a_img_width))
        a_img_height = int(round(args.a_scale * a_img_height))
        # prepare images for use
        a_image = img_utils.preprocess_image(full_a_image, a_img_width, a_img_height)
        ap_image = img_utils.preprocess_image(full_ap_image, a_img_width, a_img_height)
        b_image = img_utils.preprocess_image(full_b_image, img_width, img_height)
        print('Scale factor {} "A" shape {} "B" shape {}'.format(scale_factor, a_image.shape, b_image.shape))
        # load up the net and create the model
        net = vgg16.get_model(img_width, img_height, weights_path=args.vgg_weights, pool_mode=args.pool_mode)
        model = model_class(net, args)
        model.build(a_image, ap_image, b_image, (1, img_num_channels, img_height, img_width))

        for i in range(args.num_iterations_per_scale):
            print('Start of iteration {} x {}'.format(scale_i, i))
            start_time = time.time()
            if args.color_jitter:
                color_jitter = (args.color_jitter * 2) * (np.random.random((3, img_height, img_width)) - 0.5)
                x += color_jitter
            if args.jitter:
                jitter = args.jitter * scale_factor
                ox, oy = np.random.randint(-jitter, jitter+1, 2)
                x = np.roll(np.roll(x, ox, -1), oy, -2) # apply jitter shift
            # actually run the optimizer
            x, min_val, info = optimizer.optimize(x, model)
            print('Current loss value: {}'.format(min_val))
            # unjitter the image
            x = x.reshape((3, img_height, img_width))
            if args.jitter:
                x = np.roll(np.roll(x, -ox, -1), -oy, -2) # unshift image
            if args.color_jitter:
                x -= color_jitter
            # save the image
            if args.output_full_size:
                out_resize_shape = (full_img_height, full_img_width)
            else:
                out_resize_shape = None
            img = img_utils.deprocess_image(np.copy(x), contrast_percent=args.contrast_percent,resize=out_resize_shape)
            fname = args.result_prefix + '_at_iteration_{}_{}.png'.format(scale_i, i)
            imsave(fname, img)
            end_time = time.time()
            print('Image saved as {}'.format(fname))
            print('Iteration completed in {:.2f} seconds'.format(end_time - start_time,))