def experiment_example():
    im1 = prepare_image('data/experiments/texture3.jpg')
    im2 = prepare_image('data/experiments/texture1.jpg')
    mixed = (im1 + im2) / 2
    # mixed = prepare_gray_image('data/experiments/97033.jpg')
    # mixed = prepare_gray_image('data/separation/c.jpg')
    s = Separation("mixed", mixed, num_iter=8000)
    s.optimize()
    s.finalize()
def watermark_example():

    # im = prepare_image('data/watermark/fotolia.jpg')
    # fg = prepare_image('data/watermark/fotolia_watermark.png')
    # remove_watermark("fotolia", im, fg)
    #
    # im = prepare_image('data/watermark/copyright.jpg')
    # fg = prepare_image('data/watermark/copyright_watermark.png')
    # remove_watermark("copyright", im, fg)
    #
    # im = prepare_image('data/watermark/small_portubation.jpg')
    # fg = prepare_image('data/watermark/small_portubation_watermark.png')
    # remove_watermark("small_portubation", im, fg)

    # im = prepare_image('data/watermark/cvpr1.jpg')
    # fg = prepare_image('data/watermark/cvpr1_watermark.png')
    # remove_watermark("cvpr1", im, fg)
    #
    # im = prepare_image('data/watermark/cvpr2.jpg')
    # fg = prepare_image('data/watermark/cvpr2_watermark.png')
    # remove_watermark("cvpr2", im, fg)

    # im = prepare_image('data/watermark/coco.jpg')
    # fg = prepare_image('data/watermark/coco_watermark.png')
    # remove_watermark("coco", im, fg)
    #
    # im = prepare_image('data/watermark/coco2.jpg')
    # fg = prepare_image('data/watermark/coco2_watermark.png')
    # remove_watermark("coco2", im, fg)
    # im = prepare_image('data/watermark/cvpr3.jpg')
    # fg = prepare_image('data/watermark/cvpr3_watermark.png')
    # remove_watermark("cvpr3", im, fg)

    # im = prepare_image('data/watermark/cvpr4.jpg')
    # fg = prepare_image('data/watermark/cvpr4_watermark.png')
    # remove_watermark("cvpr4", im, fg)
    # im = prepare_image('data/watermark/AdobeStock1.jpg')
    # fg = prepare_image('data/watermark/AdobeStock1_watermark.png')
    # remove_watermark("AdobeStock1", im, fg)
    # im = prepare_image('data/watermark/AdobeStock2.jpg')
    # fg = prepare_image('data/watermark/AdobeStock2_watermark.png')
    # remove_watermark("AdobeStock2", im, fg)
    # im = prepare_image('data/watermark/AdobeStock3.jpg')
    # fg = prepare_image('data/watermark/AdobeStock3_watermark.png')
    # remove_watermark("AdobeStock3", im, fg)
    # im = prepare_image('data/watermark/AdobeStock4.jpg')
    # fg = prepare_image('data/watermark/AdobeStock4_watermark.png')
    # remove_watermark("AdobeStock4", im, fg)
    im = prepare_image('data/watermark/AdobeStock5.jpg')
    fg = prepare_image('data/watermark/AdobeStock5_watermark.png')
    remove_watermark("AdobeStock5", im, fg)
def separate_example():
    """
    runs a separation on specific two images
    :return:
    """
    # im1 = prepare_image('data/kate.png')
    # im2 = prepare_image('data/f16.png')
    # mixed = mix_images(im1, im2)

    # im1 = prepare_image('data/bear.jpg')
    # im2 = prepare_image('data/players.jpg')
    # mixed = prepare_image("data/separation/difference.bmp")

    # mixed, kernel, ratio = realistic_mix_images(im1, im2)

    # mixed = prepare_image('data/separation/postcard/ae-5-m-11.png')
    # mixed = prepare_image('data/separation/solid/m.jpg')

    # im1 = prepare_image('data/separation/g.jpg')
    # ----
    # im2 = prepare_image('data/separation/dorm1_input.png')
    #
    # s = Separation("dorm", im2, num_iter=4000)
    # s.optimize()
    # s.finalize()
    #
    # im2 = prepare_image('data/separation/dusk2_input.png')
    # im2 = np_imresize(im2, 0.5)
    #
    #
    # s = Separation("dusk", im2, num_iter=4000)
    # s.optimize()
    # s.finalize()
    im2 = prepare_image('data/separation/bus_station_input.png')

    s = Separation("bus_station", im2, num_iter=4000)
    s.optimize()
    s.finalize()

    im2 = prepare_image('data/separation/night3_input.png')
    im2 = np_imresize(im2, 0.5)

    s = Separation("night", im2, num_iter=4000)
    s.optimize()
    s.finalize()

    im2 = prepare_image('data/separation/dusk_input.png')

    s = Separation("dusj1", im2, num_iter=4000)
    s.optimize()
    s.finalize()
def watermark2_example():
    im1 = prepare_image('data/watermark/fotolia1.jpg')
    im2 = prepare_image('data/watermark/fotolia2.jpg')
    fg = prepare_image('data/watermark/fotolia_many_watermark.png')
    results = []
    for i in range(7):
        # TODO: make it median
        s = TwoImagesWatermark("fotolia_example_{}".format(i),
                               im1,
                               im2,
                               step_num=2,
                               watermark_hint=fg)
        s.optimize()
        s.finalize()
def run_on_benchmark1(benchmark_path, ambient_path):
    """

    :param benchmark_path:
    :param ambient_path:
    :return:
    """
    ambients = get_ambients_dict(ambient_path)
    for image_path in glob(os.path.join(benchmark_path, "*")):
        image_name = full_path_to_name(image_path)
        print("Processing", image_name)
        image = prepare_image(image_path)

        if image_name in ambients:
            print("Found ambient!")
            d = dehaze(image_name,
                       image,
                       8000,
                       plot_during_training=False,
                       show_every=40001,
                       use_deep_channel_prior=False,
                       gt_ambient=ambients[image_name])
        else:
            d = dehaze(image_name,
                       image,
                       4000,
                       plot_during_training=False,
                       show_every=40001,
                       use_deep_channel_prior=False,
                       gt_ambient=None)
def segment_example():
    # for i in range(1, 10):
    im = prepare_image('data/segmentation/zebra.png')
    fg = prepare_image('data/segmentation/zebra_fg - Copy.png')
    bg = prepare_image('data/segmentation/zebra_bg - Copy.png')
    # fg = prepare_image('data/segmentation/zebra_5_mask.bmp')
    # bg = 1 - prepare_image('data/segmentation/zebra_5_mask.bmp')
    # fg = prepare_image('data/segmentation/zebra_saliency.bmp')
    # fg[fg > 0.9] = 1
    # fg[fg <= 0.9] = 0
    # bg = 1 - prepare_image('data/segmentation/zebra_saliency.bmp')
    # bg[bg > 0.9] = 1
    # bg[bg <= 0.9] = 0
    s = Segmentation("zebra_{}".format(1), im, bg_hint=bg, fg_hint=fg)
    s.optimize()
    s.finalize()
def separate_image_video_example():
    # vid = prepare_video('data/separation/vid.avi')
    # vid = prepare_video('data/separation/half_horses.mp4')
    vid = prepare_video('data/separation/fountain_short.mp4')
    im = prepare_image('data/separation/d.jpg')
    im = np_imresize(im, output_shape=vid.shape[2:])
    mix = 0.5 * im + 0.5 * vid
    image_video_separation("tiger", mix)

    vid = prepare_video('data/separation/fountain_short.mp4')
    im = prepare_image('data/separation/f.jpg')
    im = np_imresize(im, output_shape=vid.shape[2:])
    mix = 0.5 * im + 0.5 * vid
    image_video_separation("misg", mix)

    vid = prepare_video('data/separation/horses_short.mp4')
    im = prepare_image('data/separation/g.jpg')
    im = np_imresize(im, output_shape=vid.shape[2:])
    mix = 0.5 * im + 0.5 * vid
    image_video_separation("cow", mix)
def ambiguity_experiment_example():
    im1 = prepare_image('data/experiments/texture3.jpg')
    im2 = prepare_image('data/experiments/texture1.jpg')
    im3 = prepare_image('data/experiments/texture4.jpg')
    im4 = prepare_image('data/experiments/texture6.jpg')
    im1_new = im1
    im1_new[:, :, :im1.shape[2] // 2] = im4[:, :, :im1.shape[2] // 2]
    im2_new = im2
    # im4 = np_imresize(im4, output_shape=im2.shape)
    im2_new[:, :, :im2.shape[2] // 2] = im3[:, :, :im2.shape[2] // 2]
    save_image("input1", im1_new)
    save_image("input2", im2_new)
    mixed = (im1_new + im2_new) / 2
    save_image("mixed", mixed)
    exit()
    for i in range(10):
        # mixed = prepare_gray_image('data/experiments/97033.jpg')
        # mixed = prepare_gray_image('data/separation/c.jpg')
        s = Separation("mixed_{}".format(i), mixed, num_iter=8000)
        s.optimize()
        s.finalize()
def watermarks2_example_no_hint():
    # im1 = prepare_image('data/watermark/123RF_1.jpg')
    # im2 = prepare_image('data/watermark/123RF_2.jpg')
    # im3 = prepare_image('data/watermark/123RF_3.jpg')
    # im4 = prepare_image('data/watermark/123RF_4.jpg')
    # results = []
    # for i in range(7):
    #     # TODO: make it median
    #     s = ManyImagesWatermarkNoHint(["123rf_example_{}".format(i) for i in range(4)], [im1, im2, im3, im4])
    #     s.optimize()
    #     s.finalize()

    im1 = prepare_image('data/watermark/fotolia1.jpg')
    im2 = prepare_image('data/watermark/fotolia2.jpg')
    im3 = prepare_image('data/watermark/fotolia3.jpg')
    results = []
    for i in range(5):
        # TODO: make it median
        s = ManyImagesWatermarkNoHint(
            ["fotolia_example_{}".format(i) for i in range(3)],
            [im1, im2, im3])
        s.optimize()
        results.append(s.best_result)
    # namedtuple("ManyImageWatermarkResult", ['cleans', 'mask', 'watermark', 'psnr'])
    obtained_watermark = median(
        [result.mask * result.watermark for result in results])
    obtained_im1 = median([result.cleans[0] for result in results])
    obtained_im2 = median([result.cleans[1] for result in results])
    obtained_im3 = median([result.cleans[2] for result in results])
    # obtained_mask = median([result.mask for result in results])
    v = np.zeros_like(obtained_watermark)
    v[obtained_watermark < 0.03] = 1
    final_im1 = v * im1 + (1 - v) * obtained_im1
    final_im2 = v * im2 + (1 - v) * obtained_im2
    final_im3 = v * im3 + (1 - v) * obtained_im3
    save_image("fotolia1_final", final_im1)
    save_image("fotolia2_final", final_im2)
    save_image("fotolia3_final", final_im3)
    save_image("fotolia_final_watermark", obtained_watermark)
Exemple #10
0
def segment_all(one_obj_path, output_path):
    for image in glob.glob(one_obj_path + "input/*"):
        name = image[len(one_obj_path + "input/"):-4]
        print("processing {}".format(name))
        #fg = image.replace("input/", "output_fg/").replace(".jpg", ".png")
        #bg = image.replace("input/", "output_bg/").replace(".jpg", ".png")
        masks = []
        im = prepare_image(image)
        #fg = prepare_image(fg)
        #bg = prepare_image(bg)
        fg = None
        bg = None
        for i in range(5):
            s = Segmentation("1obj_{}".format(i) + name,
                             im,
                             bg_hint=bg,
                             fg_hint=fg,
                             plot_during_training=True,
                             output_path=output_path)
            s.optimize()
            masks.append(s.best_result.mask)
        save_image("1obj_" + name + "_final_mask", median(masks), output_path)
def two_extending_experiment():
    im1 = prepare_image('data/kate.png')
    im2 = prepare_image('data/f16.png')
    t = SeparationExtendingExperiment("kate_f16", im1, im2, 2000, True)
    t.optimize()
    t.finalize()
def dehazing_exmaple():
    # im = prepare_image('data/dehazing/forest.png')
    # im = prepare_image('data/dehazing/tiananmen.png')
    im = prepare_image('data/dehazing/cityscape.png')
    fig.savefig('output/{}_{}.jpg'.format(
        image_name.split('.')[0], '_'.join(title.split(' '))))
        

_clear_output()    
for input_path in glob.glob(image_dir + '/*'):

    image_name = os.path.basename(input_path).split('.')[0]
    print('----> started training on image << {} >>'.format(image_name))
    
    if image_name == 'eagle':
        continue ###!!!
        
    _make_dir(image_name)
    
    im = prepare_image(os.path.join(image_dir, image_name + '.jpg'))

    orig_fg = prepare_image(os.path.join(saliency_dir_fg, image_name + '.jpg'))
    orig_bg = prepare_image(os.path.join(saliency_dir_bg, image_name + '.jpg'))


    prior_hint_name = image_name.split('.')[0] + '_cluster_hint' + '.jpg' 
    prior_fg = prepare_image(os.path.join(prior_hint_dir_fg, prior_hint_name))
    prior_bg = prepare_image(os.path.join(prior_hint_dir_bg, prior_hint_name))


    # Configs 
    stage_1_iter = 500
    stage_2_iter = 500
    show_every = 200
    loss = loss_fn(output, target)
    loss.backward()
    optimizer.step()

    return loss, nLabels


run_dir = os.path.join(args.output_dir, "runs")
run_dir = os.path.join(run_dir, time.strftime("%Y-%m-%d__%H-%M-%S"))
os.makedirs(run_dir)
tensorboard_dir = os.path.join(run_dir, "tensorboard")
os.makedirs(tensorboard_dir)
writer = SummaryWriter(tensorboard_dir)

# load image
im = prepare_image(args.input)
im_torch = torch.from_numpy(im).unsqueeze(0).to(device)
im = im.transpose(1, 2, 0)
im_shape = im.shape

# noise
noise = get_noise(2, 'noise', (im_shape[0], im_shape[1])).to(device)

# slic
if args.load_prior is None:
    # train prior model - average over samples
    rec_samples = []
    for ae_sample in range(1, args.aeSamples + 1):
        ae = skip(noise.size(1),
                  im_torch.size(1),
                  num_channels_down=[8, 16, 32],
parser.add_argument("--fg_hint", type=str, required=True)
parser.add_argument("--bg_hint", type=str, required=True)
parser.add_argument("--prior_samples", type=int, default=20)
parser.add_argument("--prior_iters", type=int, default=100)
parser.add_argument("--dip_iters", type=int, default=1000)
parser.add_argument("--output_path", type=str, default="./output/")

args = parser.parse_args()

if not os.path.exists(args.output_path):
    os.makedirs(args.output_path)

device = "cuda" if torch.cuda.is_available() else "cpu"

# train prior
img = prepare_image(args.img)

prior_input = torch.from_numpy(img).unsqueeze(0).to(device)
rec_samples = []
for ae_sample in range(1, args.prior_samples + 1):
    ae = skip(prior_input.size(1),
              prior_input.size(1),
              num_channels_down=[8, 16, 32],
              num_channels_up=[8, 16, 32],
              num_channels_skip=[0, 0, 0],
              upsample_mode='bilinear',
              filter_size_down=3,
              filter_size_up=3,
              need_sigmoid=True,
              need_bias=True,
              pad='reflection',