Esempio n. 1
0
def create_images(opt):
    #  preprocess = create_preprocess(opt)
    net = load_pretrained(opt)
    if (len(opt.ldr) == 1) and os.path.isdir(opt.ldr[0]):
        # Treat this as a directory of ldr images
        opt.ldr = [
            os.path.join(opt.ldr[0], f)
            for f in os.listdir(opt.ldr[0])
            if any(f.lower().endswith(x) for x in opt.ldr_extensions)
        ]
    for ldr_file in opt.ldr:
        loaded = cv2.imread(
            ldr_file, flags=cv2.IMREAD_ANYDEPTH + cv2.IMREAD_COLOR
        )
        if loaded is None:
            print('Could not load {0}'.format(ldr_file))
            continue
        ldr_input = preprocess(loaded, opt)
        if opt.resize:
            out_name = create_name(
                ldr_file, 'resized', 'jpg', opt.out, opt.tag
            )
            cv2.imwrite(out_name, (ldr_input * 255).astype(int))

        t_input = cv2torch(ldr_input)
        if opt.use_gpu:
            net.cuda()
            t_input = t_input.cuda()
        prediction = map_range(
            torch2cv(net.predict(t_input, opt.patch_size).cpu()), 0, 1
        )

        extension = 'exr' if opt.use_exr else 'hdr'
        out_name = create_name(
            ldr_file, 'prediction', extension, opt.out, opt.tag
        )
        print(f'Writing {out_name}')
        cv2.imwrite(out_name, prediction)
        if opt.tone_map is not None:
            tmo_img = tone_map(
                prediction, opt.tone_map, **create_tmo_param_from_args(opt)
            )
            out_name = create_name(
                ldr_file,
                'prediction_{0}'.format(opt.tone_map),
                'jpg',
                opt.out,
                opt.tag,
            )
            cv2.imwrite(out_name, (tmo_img * 255).astype(int))
Esempio n. 2
0
def create_video(opt):
    if opt.tone_map is None:
        opt.tone_map = 'reinhard'
    net = load_pretrained(opt)
    video_file = opt.ldr[0]
    cap_in = cv2.VideoCapture(video_file)
    fps = cap_in.get(cv2.CAP_PROP_FPS)
    width = int(cap_in.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap_in.get(cv2.CAP_PROP_FRAME_HEIGHT))
    #  preprocess = create_preprocess(opt)
    n_frames = cap_in.get(cv2.CAP_PROP_FRAME_COUNT)
    predictions = []
    lum_percs = []
    while cap_in.isOpened():
        perc = cap_in.get(cv2.CAP_PROP_POS_FRAMES) * 100 / n_frames
        print('\rConverting video: {0:.2f}%'.format(perc), end='')
        ret, loaded = cap_in.read()
        if loaded is None:
            break
        ldr_input = preprocess(loaded, opt)
        t_input = cv2torch(ldr_input)
        if opt.use_gpu:
            net.cuda()
            t_input = t_input.cuda()
        predictions.append(
            torch2cv(net.predict(t_input, opt.patch_size).cpu())
        )
        percs = np.percentile(predictions[-1], (1, 25, 50, 75, 99))
        lum_percs.append(percs)
    print()
    cap_in.release()

    smooth_predictions = smoothen_luminance(predictions, lum_percs)
    fourcc = cv2.VideoWriter_fourcc(*'X264')
    out_vid_name = create_name(
        video_file, 'prediction', 'avi', opt.out, opt.tag
    )
    out_vid = cv2.VideoWriter(out_vid_name, fourcc, fps, (width, height))
    for i, pred in enumerate(smooth_predictions):
        perc = (i + 1) * 100 / n_frames
        print('\rWriting video: {0:.2f}%'.format(perc), end='')
        tmo_img = tone_map(
            pred, opt.tone_map, **create_tmo_param_from_args(opt)
        )
        tmo_img = (tmo_img * 255).astype(np.uint8)
        out_vid.write(tmo_img)
    print()
    out_vid.release()
Esempio n. 3
0
def transform(hdr):
    hdr = slice_gauss(hdr, crop_size=(384, 384), precision=(0.1, 1))
    hdr = cv2.resize(hdr, (256, 256))
    hdr = map_range(hdr)
    ldr = random_tone_map(hdr)
    return cv2torch(ldr), cv2torch(hdr)