コード例 #1
0
 def __init__(self, ip_pi):
     QThread.__init__(self)
     self.threadID = 1
     self.name = "ImgThread"
     self.window = None
     self.saveOn = False
     self.mergeMertens = cv2.createMergeMertens(1., 1., 1.)
     self.mergeDebevec = cv2.createMergeDebevec()
     self.toneMap = cv2.createTonemapReinhard()
     #        self.claheProc = cv2.createCLAHE(clipLimit=1, tileGridSize=(8,8))
     #        self.simpleWB = cv2.xphoto.createSimpleWB()
     #        self.simpleWB = cv2.xphoto.createGrayworldWB()
     #        self.wb= False
     #         self.equalize = False
     #         self.clahe = False
     #        self.clipLimit = 1.
     self.reduceFactor = 1
     self.ip_pi = ip_pi
     self.hflip = False
     self.vflip = False
     self.table = None
     self.doCalibrate = False
     try:
         npz = np.load("calibrate.npz")
         self.table = npz['table']
     except Exception as e:
         pass
コード例 #2
0
def HDR(_imgs_nx1, _times_nx1, method=Debevec):
    assert _imgs_nx1.dtype == np.uint8 and _times_nx1.dtype == np.float32, "Type Error"
    assert len(_imgs_nx1) == len(
        _times_nx1) and len(_times_nx1) > 0, "Len Error"
    if method == Debevec:
        CalibrateDebevec = cv2.createCalibrateDebevec(samples=70, random=True)
        crf = CalibrateDebevec.process(src=_imgs_nx1, times=_times_nx1)
        merge_debvec = cv2.createMergeDebevec()
        hdr_img = merge_debvec.process(src=_imgs_nx1,
                                       times=_times_nx1,
                                       response=crf)
        tonemap = cv2.createTonemapDurand(gamma=1.4)
        res_img = tonemap.process(hdr_img.copy())
        return crf, hdr_img, res_img
    if method == Robertson:
        CalibrateRobertson = cv2.createCalibrateRobertson()
        crf = CalibrateRobertson.process(src=_imgs_nx1, times=_times_nx1)
        merge_robertson = cv2.createMergeRobertson()
        hdr_img = merge_robertson.process(src=_imgs_nx1,
                                          times=_times_nx1,
                                          response=crf)
        #local tonermap
        tonemap = cv2.createTonemapDurand(gamma=1.4)
        res_img = tonemap.process(hdr_img.copy())
        return crf, hdr_img, res_img
    if method == Mertens:
        merge_mertens = cv2.createMergeMertens()
        res_img = merge_mertens.process(_imgs_nx1)
        # cv2.imshow("ss", res_img)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()
        # res_mertens_8bit = np.clip(res_img*255, 0, 255).astype('uint8')
        # cv2.imwrite("PyFusion.png", res_mertens_8bit)
        return res_img
コード例 #3
0
 def generate_training_data(self, imageDirPathList):
     merge_mertens = cv2.createMergeMertens()
     for scene_path in imageDirPathList:
         img_path_list = glob.glob(scene_path + '/input*.ppm')
         img_path_list.sort()
         cnt = 0
         temp_image_list = []
         for image_path in img_path_list:
             im = cv2.imread(image_path, flags=cv2.IMREAD_ANYDEPTH)
             y_channel = self.__getYChannel(image_path)
             image_str = scene_path + '/exposure' + str(cnt) + '.png'
             # y_channel = cv2.resize(y_channel, (512, 512), interpolation=cv2.INTER_CUBIC)
             cv2.imwrite(image_str, y_channel)
             print(image_str + ' has been generated!')
             temp_image_list.append(im)
             cnt += 1
         # rgb_gt = merge_mertens.process(temp_image_list)
         # rgb_gt*=255
         # rgb_gt_path = scene_path + '/rgb_gt.png'
         # cv2.imwrite(rgb_gt_path, rgb_gt)
         gt = cv2.imread(scene_path + '/GT(clamp).hdr',
                         flags=cv2.IMREAD_ANYDEPTH)
         tonemapDrago = cv2.createTonemapDrago(1.0, 0.7)
         ldrDrago = tonemapDrago.process(gt)
         ldrDrago = 3 * ldrDrago
         cv2.imwrite(scene_path + "/rgb_gt.png", ldrDrago * 255)
         y_channel_of_gt = self.__getYChannel(scene_path + '/rgb_gt.png')
         gt_path = scene_path + '/gt.png'
         # y_channel_of_gt = cv2.resize(y_channel_of_gt, (512, 512), interpolation=cv2.INTER_CUBIC)
         cv2.imwrite(gt_path, y_channel_of_gt)
         print(gt_path + ' has been generated!')
コード例 #4
0
def fuse_multi_exposure_images(im: np.ndarray,
                               under_ex: np.ndarray,
                               over_ex: np.ndarray,
                               bc: float = 1,
                               bs: float = 1,
                               be: float = 1):
    """perform the exposure fusion method used in the DUAL paper.

    Arguments:
        im {np.ndarray} -- input image to be enhanced.
        under_ex {np.ndarray} -- under-exposure corrected image. same dimension as `im`.
        over_ex {np.ndarray} -- over-exposure corrected image. same dimension as `im`.

    Keyword Arguments:
        bc {float} -- parameter for controlling the influence of Mertens's contrast measure. (default: {1})
        bs {float} -- parameter for controlling the influence of Mertens's saturation measure. (default: {1})
        be {float} -- parameter for controlling the influence of Mertens's well exposedness measure. (default: {1})

    Returns:
        np.ndarray -- the fused image. same dimension as `im`.
    """
    merge_mertens = cv2.createMergeMertens(bc, bs, be)
    images = [
        np.clip(x * 255, 0, 255).astype("uint8")
        for x in [im, under_ex, over_ex]
    ]
    fused_images = merge_mertens.process(images)
    return fused_images
コード例 #5
0
ファイル: HDRrendering.py プロジェクト: YoshiRi/HDRpython
def Rendering(img_list,exposure_times):
    # Merge exposures to HDR image
    merge_debvec = cv2.createMergeDebevec()
    hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy())
    merge_robertson = cv2.createMergeRobertson()
    hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy())
    
    # Tonemap HDR image
    tonemap1 = cv2.createTonemapDurand(gamma=2.2)
    res_debvec = tonemap1.process(hdr_debvec.copy())
    tonemap2 = cv2.createTonemapDurand(gamma=1.3)
    res_robertson = tonemap2.process(hdr_robertson.copy())

     # Exposure fusion using Mertens
    merge_mertens = cv2.createMergeMertens()
    res_mertens = merge_mertens.process(img_list)

     # Convert datatype to 8-bit and save
    res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8')
    res_robertson_8bit = np.clip(res_robertson*255, 0, 255).astype('uint8')
    res_mertens_8bit = np.clip(res_mertens*255, 0, 255).astype('uint8')

    cv2.imwrite("ldr_debvec.jpg", res_debvec_8bit)
    cv2.imwrite("ldr_robertson.jpg", res_robertson_8bit)
    cv2.imwrite("fusion_mertens.jpg", res_mertens_8bit)
コード例 #6
0
    def tonemap(self):
        """Tonemapps the undistorted planes. :func:`~reconstruction.Reconstruction.undistort` must be called before."""

        start = timeit.default_timer()
        print "Tonemapping undistortion planes",

        if not self.undistorted:
            raise RuntimeError("undistort() must be called first")

        # HDR fusion using Mertens method
        merge_mertens = cv2.createMergeMertens()

        # Iterate over both imagers
        for imager in [self.imager1, self.imager2]:
            self.tonemapped[imager] = {}

            # Iterate over all set of undistortion parameters
            for (altitude, pixelWidth, tilt) in self.iter_planes():
                key = str(altitude) + '-' + str(pixelWidth) + '-' + str(
                    tilt[0]) + '-' + str(tilt[1])

                # HDR fusion, rescaling and tonemapping
                tm = merge_mertens.process(self.undistorted[imager][key])
                tm = skimage.exposure.rescale_intensity(
                    tm, in_range=(0, 1), out_range='uint16').astype('uint16')

                # Tonemapping using CLAHE (Contrast Limited Adaptive Histogram Equalization)
                tm = skimage.exposure.equalize_adapthist(tm)
                self.tonemapped[imager][key] = np.clip(255 * tm, 0,
                                                       255).astype('uint8')

        stop = timeit.default_timer()
        print ': ' + str(stop - start)
コード例 #7
0
def HDR(img1, img2, img3, img4):
    img_list = [img1, img2, img3, img4]
    # exposure_times = np.array([0.0333, 0.25, 2.5, 15.0], dtype=np.float32)
    # Mertensを用いて露光を統合
    merge_mertens = cv2.createMergeMertens()
    res_mertens = merge_mertens.process(img_list)
    out = np.clip(res_mertens*255, 0, 255).astype('uint8')
    return out
コード例 #8
0
ファイル: effect_hdr.py プロジェクト: Hoangthang017/CS231.L11
def effect_hdr(imgs):
    # hợp nhất các ảnh phơi sáng bằng thuật toán Mertens
    merge_mertens = cv2.createMergeMertens()
    res_mertens = merge_mertens.process(imgs)

    # chuyển về định dạng chuẩn
    res_mertens_8bit = np.clip(res_mertens * 255, 0, 255).astype('uint8')
    return res_mertens_8bit
コード例 #9
0
def exposure_fusion(exposures):
    ##Mertens##
    # Align input images
    alignMTB = cv2.createAlignMTB()
    alignMTB.process(exposures, exposures)
    merge_mertens = cv2.createMergeMertens()
    res_mertens = merge_mertens.process(exposures)
    res_mertens_8bit = np.clip(res_mertens * 255, 0, 255).astype('uint8')
    return res_mertens_8bit
コード例 #10
0
ファイル: Main.py プロジェクト: Scott-Rubey/HDRPhotoMerge
def mergeSrcImages(images, times, response):
    print("Creating HDR Image...")
    merge = cv2.createMergeMertens()
    hdr = merge.process(images, times, response)

    # Save HDR image.
    hdrCompleteName = os.path.join(savePath, "HDR.hdr")
    cv2.imwrite(hdrCompleteName, hdr)

    return hdr
コード例 #11
0
ファイル: process_images.py プロジェクト: geddes88/CamPy4Pi
def merge_image(images):
    #images=[cv2.imread(image) for image in images]
    alignmtb = cv2.createAlignMTB()
    alignmtb.process(images, images)
    mm = cv2.createMergeMertens()
    result_mertens = mm.process(images)
    res_mertens_8bit = np.clip(result_mertens * 255, 0, 255).astype('uint8')
    img_out = res_mertens_8bit

    return img_out
コード例 #12
0
def LDR_fusion_Mertens(paths):
    # Loading exposure images into a list
    img_list = [cv.imread(str(fn)) for fn in paths]

    # Exposure fusion using Mertens
    merge_mertens = cv.createMergeMertens()
    res_mertens = merge_mertens.process(img_list)

    # Convert datatype to 8-bit and save
    res_mertens_8bit = np.clip(res_mertens * 255, 0, 255).astype('uint8')
    cv.imwrite("img/fusion_mertens.jpg", res_mertens_8bit)
コード例 #13
0
ファイル: hdr_fusion.py プロジェクト: dbdl32/HDR
def combine(img_stack):
    alignMTB = cv2.createAlignMTB()
    alignMTB.process(img_stack, img_stack)
    #exposure_times = np.array([1/100, 1/160, 1/320, 1/500,1/800,1/1600], dtype=np.float32)
    #do HDR calculation
    # Merge exposures to HDR image
    merge_mertens = cv2.createMergeMertens()
    res_mertens = merge_mertens.process(img_stack)
    res_mertens_8bit = np.clip(res_mertens * 255, 0, 255).astype('uint8')
    ################################################################
    #imgBGR = cv2.cvtColor(res_mertens_8bit,cv2.COLOR_RGB2BGR)
    return res_mertens_8bit
コード例 #14
0
def createExposureFusion(input_path, filenames):    
# empty stack
    images = []
# load the images, convert them to BGR and populate stack
    for filename in filenames:
        filepath = os.path.join(input_path, filename)
        ldr_img = cv2.imread(filepath, cv2.IMREAD_ANYCOLOR)        
        images.append(ldr_img)
        
# convert the ldr_stack to exposure fusion image
    exp_fusion = cv2.createMergeMertens()
    ldr_fusion = exp_fusion.process(images)
    ldr_fusion *=255
    return ldr_fusion
コード例 #15
0
def main():
    global imgL, imgR
    while not rospy.is_shutdown():
        if imgL is None or imgR is None:
            print('img is none')
            continue
        lena_org = imgL
        lena_move = imgR
        rw, cl, nb = lena_org.shape
        cv2.imshow("lena", lena_org)
        cv2.imshow("lena move", lena_move)
        over_lay_image = (lena_org.astype('float32') +
                          lena_move.astype('float32')) / 2
        over_lay_image = over_lay_image.astype('uint8')
        cv2.imshow("overlay image", over_lay_image)
        # cv2.waitKey(0)
        b, g, r = cv2.split(lena_org)
        bb, gg, rr = cv2.split(lena_move)
        lena_org_gray = cv2.cvtColor(lena_org, cv2.COLOR_BGR2GRAY)
        lena_move_gray = cv2.cvtColor(lena_move, cv2.COLOR_BGR2GRAY)
        y, x = np.mgrid[:int(874 / 2), :int(1152 / 2)]
        x_m, y_m = findAdjustRemapPanPoints(lena_org_gray, lena_move, x, y)
        # print (x_m, y_m)
        print len(x_m)
        x_m = x_m.astype('float32')
        y_m = y_m.astype('float32')
        lena_register = cv2.remap(lena_move_gray, x_m, y_m, cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT,
                                  borderValue=0)
        bb = cv2.remap(bb, x_m, y_m, cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT,
                       borderValue=0)
        gg = cv2.remap(gg, x_m, y_m, cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT,
                       borderValue=0)
        rr = cv2.remap(rr, x_m, y_m, cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT,
                       borderValue=0)
        bgr = cv2.merge((bb, gg, rr))
        over_lay_image = (lena_org_gray.astype('float32') +
                          lena_register.astype('float32')) / 2
        over_lay_image = over_lay_image.astype('uint8')
        images = [bgr, lena_org]
        merge_mertens = cv2.createMergeMertens()
        res_mertens = merge_mertens.process(images)
        cv2.imshow('mertens', res_mertens)
        cv2.imshow("overlay image2", over_lay_image)
        bgr = cv2.cvtColor(over_lay_image, cv2.COLOR_GRAY2BGR)
        cv2.imshow('bgr', bgr)
        k = cv2.waitKey(1) & 0xff
        if k == ord('q'):
            break
        rospy.sleep(0.1)
    cv2.destroyAllWindows()
コード例 #16
0
def main():
    global client, img
    images = []
    delta = 25
    set_param('auto_exposure', True)
    set_param('auto_frame_rate', True)
    ev_auto = get_param('exposure')
    print("EV_auto: {0}".format(ev_auto))
    set_param('auto_exposure', False)
    # exposure = [ev_auto - delta, ev_auto, ev_auto + delta]
    exposure = [ev_auto - delta, ev_auto + delta]
    for ev in exposure:
        t = time.time()
        set_param('exposure', int(ev))
        delta_t = time.time() - t
        print("time: {0}".format(delta_t))
        time.sleep(1)
        name = 'image exposure :' + str(ev)
        images.append(img.copy())
        # EV = log2(f^2 / t)
        # et = math.pow(f, 2.0) / math.pow(2.0, ev)
        cv2.imshow(name, img.copy())

    exposure_times = np.array(exposure, dtype=np.float32)
    # debvec
    merge_debvec = cv2.createMergeDebevec()
    hdr_debvec = merge_debvec.process(images, times=exposure_times.copy())
    # robertson
    merge_robertson = cv2.createMergeRobertson()
    hdr_robertson = merge_robertson.process(images,
                                            times=exposure_times.copy())

    tonemap1 = cv2.createTonemapDurand(gamma=2.2)
    res_debvec = tonemap1.process(hdr_debvec.copy())
    tonemap2 = cv2.createTonemapDurand(gamma=1.3)
    res_robertson = tonemap2.process(hdr_robertson.copy())

    #  mertens not
    merge_mertens = cv2.createMergeMertens()
    res_mertens = merge_mertens.process(images)

    cv2.imshow('debvec', res_debvec)
    cv2.imshow('robertson', res_robertson)
    cv2.imshow('mertens', res_mertens)

    while True:
        key = cv2.waitKey(1) & 0xff
        if key == ord('q'):
            break
コード例 #17
0
    def test_umat_merge_mertens(self):
        if self.extraTestDataPath is None:
            self.fail('Test data is not available')

        test_data_path = os.path.join(self.extraTestDataPath, 'cv', 'hdr')

        images, _ = load_exposure_seq(os.path.join(test_data_path, 'exposures'))

        merge = cv.createMergeMertens()
        mat_result = merge.process(images)

        umat_images = [cv.UMat(img) for img in images]
        umat_result = merge.process(umat_images)

        self.assertTrue(np.allclose(umat_result.get(), mat_result))
コード例 #18
0
def fusion_Image(dir):
    images = read_image(dir)
    alignMTB = cv2.createAlignMTB()
    alignMTB.process(images, images)

    mergeMertens = cv2.createMergeMertens()
    exposureFusion = mergeMertens.process(images)

    print(type(exposureFusion))
    cv2.imshow('Fusion', exposureFusion)
    while True:
        k = cv2.waitKey(33)
        if k == 27:
            break
    cv2.destroyAllWindows()
コード例 #19
0
ファイル: demoExposureFusion.py プロジェクト: inuyosi/study
def getFusedImage(imgs, param):
    if 0:
        # Exposure Fusion using OpenCV
        ims = imgs * 255
        merge_mertens = cv2.createMergeMertens(param[0], param[1], param[2])
        tmp = merge_mertens.process(ims)
        tmp = (tmp - tmp.min()) / (tmp.max() - tmp.min())
    else:
        tmp = ef.exposureFusion(imgs, param)
    tmp[tmp < 0] = 0
    tmp[tmp > 1] = 1
    rImg = ImageTk.PhotoImage(
        Image.fromarray(cv2.cvtColor(np.uint8(tmp * 255),
                                     cv2.COLOR_BGR2RGB)).resize(
                                         (w2, h2), Image.ANTIALIAS))
    return rImg
コード例 #20
0
def hdr(low,ref,high):
    low_warp=warp(low,ref)
    high_warp=warp(high,ref)
#    hhigh_warp=warp(hhigh,ref)
    #cv2.imwrite('high_warp_qq.jpg',high_warp)
    #cv2.imwrite('low_warp_qq.jpg',low_warp)

    
    img_list = [low_warp,ref,high_warp]
    
    merge_mertens = cv2.createMergeMertens()
    res_mertens = merge_mertens.process(img_list)

    
    res_mertens_8bit = np.clip(res_mertens*255, 0, 255).astype('uint8')
    return res_mertens_8bit
コード例 #21
0
ファイル: mergeImages.py プロジェクト: liuguoyou/bLUe_PYSIDE2
def expFusion(imList):
    """
    Computes the exposure fusion of a list of images with identical sizes.
    Cf. Exposure Fusion: A Simple and Practical Alternative to High Dynamic Range Photography.
    Tom Mertens, Jan Kautz and Frank Van Reeth In Computer Graphics Forum, 28 (1) 161 - 171, 2009
    @param imList:
    @type imList: list of ndarray
    @return:
    @rtype: ndarray
    """
    alignMTB = cv2.createAlignMTB()
    alignMTB.process(imList, imList)

    mergeMertens = cv2.createMergeMertens()
    fusion = mergeMertens.process(imList)
    np.clip(fusion, 0.0, 1.0, out=fusion)
    return fusion * 255
コード例 #22
0
def run():
    images, times = loadExposureSeq(settings.BASE_DIR)
    calibrate = cv.createCalibrateDebevec()
    response = calibrate.process(images, times)

    merge_debevec = cv.createMergeDebevec()
    hdr = merge_debevec.process(images, times, response)

    tonemap = cv.createTonemap(2.2)
    ldr = tonemap.process(hdr)

    merge_mertens = cv.createMergeMertens()
    fusion = merge_mertens.process(images)

    out_file_name = 'fusion' + date_time + '.png'
    OUT_FILE = os.path.join(settings.HDR_ROOT, out_file_name)
    cv.imwrite(OUT_FILE, fusion * 255)
コード例 #23
0
ファイル: ImageThread.py プロジェクト: jokubasver/yart
    def __init__(self, ip_pi):
        QThread.__init__(self)
        self.threadID = 1
        self.name = "ImgThread"
        self.window = None
        self.saveOn = False
        self.mergeMertens = cv2.createMergeMertens(
            0, 1, 1)  #contrast saturation exposure
        #         self.mergeMertens = cv2.createMergeMertens()
        #         print("Contrast:",self.mergeMertens.getContrastWeight())
        #         print("Saturation:",self.mergeMertens.getSaturationWeight())
        #         print("Exposure:",self.mergeMertens.getExposureWeight())
        self.mergeDebevec = cv2.createMergeDebevec()
        self.calibrateDebevec = cv2.createCalibrateDebevec()
        #        self.toneMap = cv2.createTonemapReinhard(gamma=1.)
        self.toneMap = cv2.createTonemapDrago()
        #        self.linearTonemap = cv2.createTonemap(1.)  #Normalize with Gamma 1.2

        #        self.toneMap = cv2.createTonemapMantiuk()
        #        self.claheProc = cv2.createCLAHE(clipLimit=1, tileGridSize=(8,8))
        #        self.simpleWB = cv2.xphoto.createSimpleWB()
        #        self.simpleWB = cv2.xphoto.createGrayworldWB()
        #        self.wb= False
        #         self.equalize = False
        #         self.clahe = False
        #        self.clipLimit = 1.
        #        self.alignMTB = cv2.createAlignMTB()

        self.invgamma = np.empty((1, 256), np.uint8)
        for i in range(256):
            self.invgamma[0, i] = np.clip(pow(i / 255.0, 0.45) * 255.0, 0, 255)
        self.gamma = np.empty((1, 256), np.uint8)
        for i in range(256):
            self.gamma[0, i] = np.clip(pow(i / 255.0, 2.2) * 255.0, 0, 255)
        self.reduceFactor = 1
        self.ip_pi = ip_pi
        self.hflip = False
        self.vflip = False
        self.table = None
        self.doCalibrate = False
        try:
            npz = np.load("calibrate.npz")
            self.table = npz['table']
        except Exception as e:
            pass
コード例 #24
0
def main(argv):
    # input image files
    folderNM = './img/house/'
    fileNM = ["A.jpg", "B.jpg", 'C.jpg', 'D.jpg']
    imgs = [cv2.imread(folderNM + fn) for fn in fileNM]

    # set parameters
    cp = 1.0  # contrast parameter
    sp = 1.0  # saturation parameter
    ep = 1.0  # exposure parameter

    # Exposure Fusion using OpenCV
    merge_mertens = cv2.createMergeMertens(cp, sp, ep)
    rImg = merge_mertens.process(imgs)

    # show output image
    cv2.imshow('results', rImg)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #25
0
def createHDR(images, timeDifference):
    times = np.array([1.0, timeDifference], dtype=np.float32)
    #try:
    alignMTB = cv2.createAlignMTB()
    alignMTB.process(images, images)

    mergeMertens = cv2.createMergeMertens(10, 4, 8)
    hdrs = mergeMertens.process(images)

    #calibrateDebevec = cv2.createCalibrateDebevec()
    #responseDebevec = calibrateDebevec.process(images, times)

    #mergeDebevec = cv2.createMergeDebevec()
    #hdrDebevec = mergeDebevec.process(images, times, responseDebevec)

    #tonemapReinhard = cv2.createTonemapReinhard(1.5, 2.0,0,0)
    #ldrReinhard = tonemapReinhard.process(hdrDebevec)

    #return ldrReinhard
    return hdrs
コード例 #26
0
def ExposureFusion(filenames):

    # Read example images
    images = readImagesAndTimes(filenames)

    mergeMertens = cv2.createMergeMertens()
    exposureFusion = mergeMertens.process(images)

    # Convert gt_full to 16 bit unsigned integers.
    z = (65535 * ((exposureFusion - exposureFusion.min()) /
                  exposureFusion.ptp())).astype(np.uint16)

    with open('result.png', 'wb') as f:
        writer = png.Writer(width=z.shape[1],
                            height=z.shape[0],
                            bitdepth=16,
                            greyscale=False)

        # Convert z to the Python list of lists expected by
        # the png writer.
        z2list = z.reshape(-1, z.shape[1] * z.shape[2]).tolist()
        writer.write(f, z2list)
コード例 #27
0
def ExposureFusion(filenames, i):
    # Read images
    print('Group number %d' % i)
    print("    Reading images ... ")
    # Read example images
    images = readImagesAndTimes(filenames)
    # Can't Align input images, so skip that step

    # Merge using Exposure Fusion
    print("    Merging using Exposure Fusion ... ")
    mergeMertens = cv2.createMergeMertens()
    exposureFusion = mergeMertens.process(images)

    # Convert gt_full to 16 bit unsigned integers.
    z = (65535 * ((exposureFusion - exposureFusion.min()) /
                  exposureFusion.ptp())).astype(np.uint16)
    # Save output image
    print("    Saving output...")
    with open('result/00%d_00_16s.png' % (i + 219), 'wb') as f:
        writer = png.Writer(width=z.shape[1], height=z.shape[0], bitdepth=16)
        # Convert z to the Python list of lists expected by
        # the png writer.
        z2list = z.reshape(-1, z.shape[1] * z.shape[2]).tolist()
        writer.write(f, z2list)
コード例 #28
0
exposure_times = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32)

# Merge exposures to HDR image
merge_debevec = cv.createMergeDebevec()
hdr_debevec = merge_debevec.process(img_list, times=exposure_times.copy())
merge_robertson = cv.createMergeRobertson()
hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy())

# Tonemap HDR image
tonemap1 = cv.createTonemap(gamma=2.2)
res_debevec = tonemap1.process(hdr_debevec.copy())
tonemap2 = cv.createTonemap(gamma=1.3)
res_robertson = tonemap2.process(hdr_robertson)

# Exposure fusion using Mertens
merge_mertens = cv.createMergeMertens()
res_mertens = merge_mertens.process(img_list)

# Convert datatype to 8-bit and save
res_debevec_8bit = np.clip(res_debevec * 255, 0, 255).astype('uint8')
res_robertson_8bit = np.clip(res_robertson * 255, 0, 255).astype('uint8')
res_mertens_8bit = np.clip(res_mertens * 255, 0, 255).astype('uint8')
cv.imwrite("hdr_debevec.jpg", res_debevec_8bit)
cv.imwrite("hdr_robertson.jpg", res_robertson_8bit)
cv.imwrite("fusion_mertens.jpg", res_mertens_8bit)

# Estimate camera response function (CRF)
cal_debevec = cv.createCalibrateDebevec()
crf_debevec = cal_debevec.process(img_list, times=exposure_times)
hdr_debevec = merge_debevec.process(img_list,
                                    times=exposure_times.copy(),
コード例 #29
0
exposure_times = list()
lowest_exp_time = 1 / 1024.
for i in range(len(out_img_list)):
    exposure_times.append(lowest_exp_time * math.pow(math.sqrt(2.), i))
exposure_times = np.array(exposure_times).astype(np.float32)

for i, out_img in enumerate(out_img_list):
    numer, denom = float(exposure_times[i]).as_integer_ratio()
    if int(math.log10(numer) + 1) > 9:
        numer = int(numer / 10 * (int(math.log10(numer) + 1) - 9))
        denom = int(denom / 10 * (int(math.log10(numer) + 1) - 9))
    if int(math.log10(denom) + 1) > 9:
        numer = int(numer / 10 * (int(math.log10(denom) + 1) - 9))
        denom = int(denom / 10 * (int(math.log10(denom) + 1) - 9))
    exif_ifd = {piexif.ExifIFD.ExposureTime: (numer, denom)}
    exif_dict = {"Exif": exif_ifd}
    exif_bytes = piexif.dump(exif_dict)
    out_img_ = cv2.cvtColor(out_img, cv2.COLOR_BGR2RGB)
    out_img_pil = Image.fromarray(out_img_)
    out_img_pil.save(outdir_path + "/exposure_" + str(i) + ".jpg",
                     exif=exif_bytes)

merge_debvec = cv2.createMergeDebevec()
hdr_debvec = merge_debvec.process(out_img_list, times=exposure_times.copy())
cv2.imwrite(outdir_path + '/MergeDebevec.hdr', hdr_debvec)

merge_mertens = cv2.createMergeMertens(1., 1., 1.e+38)
res_mertens = merge_mertens.process(out_img_list)
cv2.imwrite(outdir_path + '/MergeMertens.hdr', res_mertens)
コード例 #30
0
ファイル: exposureFusion.py プロジェクト: FA78DWA/learnopencv
  if len(sys.argv) > 1:
    # Read images from the command line
    images = []
    for filename in sys.argv[1:]:
      im = cv2.imread(filename)
      images.append(im)
    needsAlignment = False
  else :
    # Read example images
    images = readImagesAndTimes()
    needsAlignment = False
  
  # Align input images
  if needsAlignment:
    print("Aligning images ... ")
    alignMTB = cv2.createAlignMTB()
    alignMTB.process(images, images)
  else :
    print("Skipping alignment ... ")
  
  # Merge using Exposure Fusion
  print("Merging using Exposure Fusion ... ");
  mergeMertens = cv2.createMergeMertens()
  exposureFusion = mergeMertens.process(images)

  # Save output image
  print("Saving output ... exposure-fusion.jpg")
  cv2.imwrite("exposure-fusion.jpg", exposureFusion * 255)


コード例 #31
0
ファイル: hdr_imaging.py プロジェクト: ArkaJU/opencv
## [Load images and exposure times]
images, times = loadExposureSeq(args.input)
## [Load images and exposure times]

## [Estimate camera response]
calibrate = cv.createCalibrateDebevec()
response = calibrate.process(images, times)
## [Estimate camera response]

## [Make HDR image]
merge_debevec = cv.createMergeDebevec()
hdr = merge_debevec.process(images, times, response)
## [Make HDR image]

## [Tonemap HDR image]
tonemap = cv.createTonemapDurand(2.2)
ldr = tonemap.process(hdr)
## [Tonemap HDR image]

## [Perform exposure fusion]
merge_mertens = cv.createMergeMertens()
fusion = merge_mertens.process(images)
## [Perform exposure fusion]

## [Write results]
cv.imwrite('fusion.png', fusion * 255)
cv.imwrite('ldr.png', ldr * 255)
cv.imwrite('hdr.hdr', hdr)
## [Write results]
コード例 #32
0
        images.append(im)
    return images


if __name__ == '__main__':

    # Read images
    print("Reading images ... ")

    # Read example images
    images = readImagesAndTimes()
    # Can't Align input images, so skip that step

    # Merge using Exposure Fusion
    print("Merging using Exposure Fusion ... ")
    mergeMertens = cv2.createMergeMertens()
    exposureFusion = mergeMertens.process(images)

    # Convert gt_full to 16 bit unsigned integers.
    # ptp means the value range from min to max
    z = (65535 * ((exposureFusion - exposureFusion.min()) /
                  exposureFusion.ptp())).astype(np.uint16)

    # Save output image
    print("Saving output...")
    with open('exposure-fusion.png', 'wb') as f:
        writer = png.Writer(width=z.shape[1], height=z.shape[0], bitdepth=16)
        # Convert z to the Python list of lists expected by
        # the png writer.
        z2list = z.reshape(-1, z.shape[1] * z.shape[2]).tolist()
        writer.write(f, z2list)
コード例 #33
0
import cv2            #needed for histogram plotting and preview window display
                #need to build and install opencv version 3 to support frame blending
import threading
import struct
import logging
import config
import numpy as np
import io
from time import sleep
from fractions import Fraction
from PyQt5 import QtCore as qtcore
from PyQt5 import QtGui

mask_pct = .8            #this determines what (center) portion of the image is used for histogram calculations. (Avoid using black borders)
blender=cv2.createMergeMertens()   #COMMENT OUT IF NOT USING opencv version 3+ and bracketing

def thefilename(i,suffix=""):
    fname=str(config.folder) + "/img%.5d%s.jpg" % (i,suffix)
    logging.debug(fname)
    return fname
    
def subDims(amt, fraction): #sub dimensions: Pass it a width or height and get the origin and width/height of the center portion
    return(int(amt*(1-fraction)/2),int(amt*(1+fraction)/2))
def getMask(img, fraction):
    mask = np.zeros(img.shape[:2], np.uint8)
    (x,x2,y,y2)=subDims(img.shape[0],fraction)+subDims(img.shape[1],fraction)
    mask[x:x2, y:y2]=255
    return mask

def saveable_img(img):
    return np.array(img,dtype=float)*float(255)