if not args.input: parser.print_help() exit(0) ## [Load images and exposure times] images, times = loadExposureSeq(args.input) ## [Load images and exposure times] ## [Estimate camera response] calibrate = cv.createCalibrateDebevec() response = calibrate.process(images, times) ## [Estimate camera response] ## [Make HDR image] merge_debevec = cv.createMergeDebevec() hdr = merge_debevec.process(images, times, response) ## [Make HDR image] ## [Tonemap HDR image] tonemap = cv.createTonemapDurand(2.2) ldr = tonemap.process(hdr) ## [Tonemap HDR image] ## [Perform exposure fusion] merge_mertens = cv.createMergeMertens() fusion = merge_mertens.process(images) ## [Perform exposure fusion] ## [Write results] cv.imwrite('fusion.png', fusion * 255)
def __init__(self, ip_pi): QThread.__init__(self) self.threadID = 1 self.name = "ImgThread" self.window = None self.saveOn = False self.mergeMertens = cv2.createMergeMertens(1., 1., 1.) self.mergeDebevec = cv2.createMergeDebevec() self.toneMap = cv2.createTonemapReinhard() # self.claheProc = cv2.createCLAHE(clipLimit=1, tileGridSize=(8,8)) # self.simpleWB = cv2.xphoto.createSimpleWB() # self.simpleWB = cv2.xphoto.createGrayworldWB() # self.wb= False # self.equalize = False # self.clahe = False # self.clipLimit = 1. self.reduceFactor = 1 self.ip_pi = ip_pi self.hflip = False self.vflip = False self.table = None self.doCalibrate = False try: npz = np.load("calibrate.npz") self.table = npz['table'] except Exception as e: pass
def process_photos(folders): psave = folders["psave"] ptmp = folders["ptmp"] pgal = folders["pgal"] foldername = folders["foldername"] save_folder = psave + "/" + foldername makedirs(save_folder) onlyfiles = [f for f in listdir(ptmp) if isfile(join(ptmp, f))] images = [] times = np.array([], dtype=np.float32) logging.info("Loading images for HDR") for filename in onlyfiles: filesrc = ptmp + "/" + filename filedest = save_folder + "/" + filename shutil.move(filesrc, filedest) file_data = open(filedest, 'rb') tags = exifread.process_file(file_data) exposure = float(tags['EXIF ExposureTime'].values[0]) im = cv2.imread(filedest) images.append(im) times = np.append(times, np.float32(exposure)) logging.info("Align input images") align_MTB = cv2.createAlignMTB() align_MTB.process(images, images) logging.info('Obtain Camera Response Function (CRF)') calibrate_debevec = cv2.createCalibrateDebevec() response_debevec = calibrate_debevec.process(images, times) logging.info('Merge images into an HDR linear image') merge_debevec = cv2.createMergeDebevec() hdr_debevec = merge_debevec.process(images, times, response_debevec) logging.info('Save HDR image') save_file = pgal + "/" + foldername cv2.imwrite(save_file + ".hdr", hdr_debevec) logging.info("Tonemaping using Drago's method ... ") tonemap_drago = cv2.createTonemapDrago(1.0, 0.7) ldr_drago = tonemap_drago.process(hdr_debevec) ldr_drago = 3 * ldr_drago cv2.imwrite(save_file + "_drago.jpg", ldr_drago * 255) logging.info("Tonemaping using Reinhard's method ... ") tonemap_reinhard = cv2.createTonemapReinhard(1.5, 0,0,0) ldr_reinhard = tonemap_reinhard.process(hdr_debevec) cv2.imwrite(save_file + "_reinhard.jpg", ldr_reinhard * 255) logging.info("Tonemaping using Mantiuk's method ... ") tonemap_mantiuk = cv2.createTonemapMantiuk(2.2,0.85, 1.2) ldr_mantiuk = tonemap_mantiuk.process(hdr_debevec) ldr_mantiuk = 3 * ldr_mantiuk cv2.imwrite(save_file + "_mantiuk.jpg", ldr_mantiuk * 255)
def combine_hdr(images, exposures, response=None): """ expects a dict of color images of shape (height, width, channels), and a list of exposures for those respective images. https://docs.opencv.org/master/d2/df0/tutorial_py_hdr.html expects float32 exposures, in seconds """ try: assert (len(exposures) == len(images) and len(exposures) != 0) except AssertionError as a: print("len(exposures)= {}\tlen(images)= {}".format( len(exposures), len(images))) raise a #print("images: type={}, len={}, shape={}".format(type(images), len(images), np.shape(images))) #print("exposures: type={}, len={}, shape={}".format(type(exposures), len(exposures), np.shape(exposures))) #print("response: type={}, len={}, shape={}".format(type(response), len(response), np.shape(response))) images = list(images) merge_devebec = cv.createMergeDebevec() if response is not None: hdr_devebec = merge_devebec.process(images, exposures, response) else: hdr_devebec = merge_devebec.process(images, exposures) #tonemap = cv.createTonemap(gamma=1.0) #res_devebec = tonemap.process(hdr_devebec.copy()) return hdr_devebec
def main(folder): """ Reads and process the images given by the read_and_store_data function and creates a new hdr file """ images, times = read_and_store_data(folder) """ Align the pictures using the brightest spots, it doesn't matter the exposures are different """ logging.debug("Aligning pictures") alignMTB = cv2.createAlignMTB() alignMTB.process(images, images) """ Finds the camera response function """ logging.debug("Calculating Camera response function") calibrate = cv2.createCalibrateDebevec() response = calibrate.process(images, times) """ Putting everything together into a single hdr image """ logging.debug("Merging images...") merge = cv2.createMergeDebevec() hdr = merge.process(images, times, response) """ Saving the hdr file into the specified folder """ hdr_folder = os.path.join(cwd, "HDR") logging.debug("Saving HDR at {}....".format(hdr_folder)) cv2.imwrite(hdr_folder + "/file2.hdr", hdr) logging.debug("Saved file")
def HDR(_imgs_nx1, _times_nx1, method=Debevec): assert _imgs_nx1.dtype == np.uint8 and _times_nx1.dtype == np.float32, "Type Error" assert len(_imgs_nx1) == len( _times_nx1) and len(_times_nx1) > 0, "Len Error" if method == Debevec: CalibrateDebevec = cv2.createCalibrateDebevec(samples=70, random=True) crf = CalibrateDebevec.process(src=_imgs_nx1, times=_times_nx1) merge_debvec = cv2.createMergeDebevec() hdr_img = merge_debvec.process(src=_imgs_nx1, times=_times_nx1, response=crf) tonemap = cv2.createTonemapDurand(gamma=1.4) res_img = tonemap.process(hdr_img.copy()) return crf, hdr_img, res_img if method == Robertson: CalibrateRobertson = cv2.createCalibrateRobertson() crf = CalibrateRobertson.process(src=_imgs_nx1, times=_times_nx1) merge_robertson = cv2.createMergeRobertson() hdr_img = merge_robertson.process(src=_imgs_nx1, times=_times_nx1, response=crf) #local tonermap tonemap = cv2.createTonemapDurand(gamma=1.4) res_img = tonemap.process(hdr_img.copy()) return crf, hdr_img, res_img if method == Mertens: merge_mertens = cv2.createMergeMertens() res_img = merge_mertens.process(_imgs_nx1) # cv2.imshow("ss", res_img) # cv2.waitKey(0) # cv2.destroyAllWindows() # res_mertens_8bit = np.clip(res_img*255, 0, 255).astype('uint8') # cv2.imwrite("PyFusion.png", res_mertens_8bit) return res_img
def Rendering(img_list,exposure_times): # Merge exposures to HDR image merge_debvec = cv2.createMergeDebevec() hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy()) merge_robertson = cv2.createMergeRobertson() hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy()) # Tonemap HDR image tonemap1 = cv2.createTonemapDurand(gamma=2.2) res_debvec = tonemap1.process(hdr_debvec.copy()) tonemap2 = cv2.createTonemapDurand(gamma=1.3) res_robertson = tonemap2.process(hdr_robertson.copy()) # Exposure fusion using Mertens merge_mertens = cv2.createMergeMertens() res_mertens = merge_mertens.process(img_list) # Convert datatype to 8-bit and save res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8') res_robertson_8bit = np.clip(res_robertson*255, 0, 255).astype('uint8') res_mertens_8bit = np.clip(res_mertens*255, 0, 255).astype('uint8') cv2.imwrite("ldr_debvec.jpg", res_debvec_8bit) cv2.imwrite("ldr_robertson.jpg", res_robertson_8bit) cv2.imwrite("fusion_mertens.jpg", res_mertens_8bit)
def main(): scene = "./images/scene1/" bgImage = cv2.imread(scene + "bg.png", cv2.IMREAD_COLOR) obj = cv2.imread(scene + "obj1.png", cv2.IMREAD_UNCHANGED) objQuant = quantize(obj[:, :, 0:3], 4) objQuant = cv2.cvtColor(objQuant, cv2.COLOR_RGB2RGBA) objQuant[:, :, 3] = obj[:, :, 3] out = mergeImages(bgImage, objQuant, 0.75) obj = cv2.imread(scene + "obj2.png", cv2.IMREAD_UNCHANGED) objQuant = quantize(obj[:, :, 0:3], 4) objQuant = cv2.cvtColor(objQuant, cv2.COLOR_RGB2RGBA) objQuant[:, :, 3] = obj[:, :, 3] out = mergeImages(out, objQuant, 0.75) cv2.imshow('Merge quantized', out) cv2.imwrite(scene + 'scene1answer.png', out) debevec = cv2.createMergeDebevec() merged = debevec.process([bgImage, out], np.array([0.15, 0.15], dtype=np.float32)) tonemapper = cv2.createTonemapReinhard( 0.5, 0, 0, 0) #Gamma, intensity, light_adapt, color_adapt tonemapped = tonemapper.process(merged) cv2.imshow("Merge and mapped reinhard quantized", tonemapped) out = cv2.convertScaleAbs(tonemapped, alpha=(255.0)) out = out.astype('uint8') cv2.imwrite(scene + 'scene1.png', out)
def mergeImgs(imgs, expos): # Debevec = name of the HDR algorithm used for merging merge_debvec = cv2.createMergeDebevec() hdr_debvec = merge_debvec.process(imgs, times=np.array(expos, dtype=np.float32)) tonemap1 = cv2.createTonemapDurand(gamma=2.2) res_debvec = tonemap1.process(hdr_debvec.copy()) res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8') return res_debvec_8bit
def generate_hdr_image(): """ :return: """ mergeDebevec = cv2.createMergeDebevec() hdrDebevec = mergeDebevec.process(images, times, responseDebevec) return hdrDebevec
def generate_hdr_image(images, times, responseDebevec): """ Merge multiple exposure images into one HDR image :return: """ mergeDebevec = cv2.createMergeDebevec() hdrDebevec = mergeDebevec.process(images, times, responseDebevec) return hdrDebevec
def calc_scene_hdr_psnr(): scenes_obj = parse_json_file(args.scene_map_fp) scenes_psnr = {} for i, scene in enumerate(scenes_obj): scene_fd = os.path.dirname(scene['ldr_fps'][0]) scene_rend_fp = scene['rend_fp'] scene_cv_fp = os.path.join(scene_fd, '{}_cv-hdr.jpg'.format(scene['ldr_fd'])) # scene_ldr_fps = scene['ldr_fps'] scene_exptime_fp = get_scene_exptime_fp(scene_fd) print('Processing: ' + scene_fd) print(' scene_rend_fp (r): ' + scene_rend_fp) rend_hdr_img = cv2.imread(scene_rend_fp) # Render HDR image using OpenCV if args.force or not os.path.isfile(scene_cv_fp): print(' scene_cv_fp (w): ' + scene_cv_fp) ldr_imgs = [] ldr_exptimes = [] with open(scene_exptime_fp, 'r') as exptimes_file: for line in exptimes_file: vals = line.split(',') ldr_imgs.append( cv2.imread( os.path.join(scene_fd, vals[0].split('/')[-1]))) ldr_exptimes.append(float(vals[1])) ldr_exptimes = np.array(ldr_exptimes, dtype=np.float32) # Estimate Camera response function, merge exposures calib_debevec = cv2.createCalibrateDebevec() resp_debevec = calib_debevec.process(ldr_imgs, ldr_exptimes) merge_debevec = cv2.createMergeDebevec() hdr_debevec = merge_debevec.process(ldr_imgs, ldr_exptimes, resp_debevec) # Tone mapping tone_map = cv2.createTonemapReinhard(1.5, 0, 0, 0) cv_hdr_img = tone_map.process(hdr_debevec) cv2.imwrite(scene_cv_fp, cv_hdr_img * 255.0) else: print(' scene_cv_fp (r): ' + scene_cv_fp) cv_hdr_img = cv2.imread(scene_cv_fp) # Calculate PSNR cv_hdr_img = cv2.resize(cv_hdr_img, dsize=(rend_hdr_img.shape[1], rend_hdr_img.shape[0]), interpolation=cv2.INTER_CUBIC) scene_rend_fn = scene_rend_fp.split('/')[-1] scenes_psnr[scene_rend_fn] = img_psnr(cv_hdr_img, rend_hdr_img) return scenes_psnr
def merge2HDR(self, input_path, output_path, exp_times, verbosity): """ *************************************************************** Function to merge Debayered images into HDR images :param input_path: the debayered images path :param output_path: the output HDR path :param exp_times: the input exposure times in seconds :param verbosity: show individual file progress :return: <none> NOTE: Function uses Debevec's merging algorithm to merge exposures to HDR file. *************************************************************** """ # global starting number of the HDR frames hdrnum = 0 # list all files in the debayer folder filelist = [ filename for dirpath, dirnames, filenames in os.walk(input_path) for filename in filenames if filename.endswith('.jpg') ] # check whether directory exists else create a directory if not os.path.exists(output_path): os.makedirs(output_path) # process debayer images to HDR images for i in range(0, len(filelist), len(exp_times)): exposures = [] # read the exposures and append a list of exposures to be merged to HDR for j in range(i, i + 4): filename = os.path.join(input_path, filelist[j]) ldr_image = cv2.imread(filename, cv2.IMREAD_COLOR) ldr_image = cv2.cvtColor(ldr_image, cv2.COLOR_BGR2RGB) exposures.append(ldr_image) # align the exposure list alignMTB = cv2.createAlignMTB() alignMTB.process(exposures, exposures) # obtain camera response function calibrateDebevec = cv2.createCalibrateDebevec() responseDebevec = calibrateDebevec.process(exposures, exp_times) # create HDR from camera response mergeDebevec = cv2.createMergeDebevec() hdr = mergeDebevec.process(exposures, exp_times, responseDebevec) # set output file name and write exr (we use a separate exr because OpenCV EXR is not compressed) outfilename = os.path.join(output_path, '{0:05d}.exr'.format(hdrnum)) self.writeEXR(hdr, outfilename) if verbosity == 1: print('HDR file: {0} merged..'.format(outfilename)) hdrnum += 1 return
def process_debevec(images, exposures): calibrate_debevec = cv2.createCalibrateDebevec() response_debevec = calibrateDebevec.process(images, times=exposures) merge_debevec = cv2.createMergeDebevec() return merge_debevec.process( images, times=exposures, response=response_debevec )
def main(): global client, img images = [] delta = 25 set_param('auto_exposure', True) set_param('auto_frame_rate', True) ev_auto = get_param('exposure') print("EV_auto: {0}".format(ev_auto)) set_param('auto_exposure', False) # exposure = [ev_auto - delta, ev_auto, ev_auto + delta] exposure = [ev_auto - delta, ev_auto + delta] for ev in exposure: t = time.time() set_param('exposure', int(ev)) delta_t = time.time() - t print("time: {0}".format(delta_t)) time.sleep(1) name = 'image exposure :' + str(ev) images.append(img.copy()) # EV = log2(f^2 / t) # et = math.pow(f, 2.0) / math.pow(2.0, ev) cv2.imshow(name, img.copy()) exposure_times = np.array(exposure, dtype=np.float32) # debvec merge_debvec = cv2.createMergeDebevec() hdr_debvec = merge_debvec.process(images, times=exposure_times.copy()) # robertson merge_robertson = cv2.createMergeRobertson() hdr_robertson = merge_robertson.process(images, times=exposure_times.copy()) tonemap1 = cv2.createTonemapDurand(gamma=2.2) res_debvec = tonemap1.process(hdr_debvec.copy()) tonemap2 = cv2.createTonemapDurand(gamma=1.3) res_robertson = tonemap2.process(hdr_robertson.copy()) # mertens not merge_mertens = cv2.createMergeMertens() res_mertens = merge_mertens.process(images) cv2.imshow('debvec', res_debvec) cv2.imshow('robertson', res_robertson) cv2.imshow('mertens', res_mertens) while True: key = cv2.waitKey(1) & 0xff if key == ord('q'): break
def doneByOpenCV(): print('doing HDR and TM by openCV') images, shutters, max_shift = readInfo() images = readImg(images) calibrateDebevec = cv2.createCalibrateDebevec() responseDebevec = calibrateDebevec.process(images, shutters) mergeDebevec = cv2.createMergeDebevec() hdrDebevec = mergeDebevec.process(images, shutters, responseDebevec) print('HDR max:%.2f, min:%.2f' % (hdrDebevec.max(), hdrDebevec.min())) cv2.imwrite('%s/hdr_%d_OpenCV.hdr' % (RESULT_DIR, IMG_NUM), hdrDebevec) tonemapDrago = cv2.createTonemapDrago(1, 0.8) # hand set params ldrDrago = tonemapDrago.process(hdrDebevec) ldrDrago = 255 * ldrDrago * 1.5 # hand set params print('LDR max:%.2f, min:%.2f' % (ldrDrago.max(), ldrDrago.min())) cv2.imwrite('%s/ldr_%d_OpenCV.jpg' % (RESULT_DIR, IMG_NUM), ldrDrago)
def CRF_merge_Debevec(paths, exposures): # Loading exposure images into a list img_fn = paths img_list = [cv.imread(str(fn)) for fn in img_fn] exposure_times = np.array(exposures, dtype=np.float32) # Merge exposures to HDR image merge_debevec = cv.createMergeDebevec() hdr_debevec = merge_debevec.process(img_list, times=exposure_times.copy()) # Tonemap HDR image tonemap = cv.createTonemapMantiuk() res_debevec = tonemap.process(hdr_debevec.copy()) # Convert datatype to 8-bit and save res_debevec_8bit = np.clip(res_debevec * 255, 0, 255).astype('uint8') cv.imwrite("img/LDR_debevec.jpg", res_debevec_8bit)
def run(): images, times = loadExposureSeq(settings.BASE_DIR) calibrate = cv.createCalibrateDebevec() response = calibrate.process(images, times) merge_debevec = cv.createMergeDebevec() hdr = merge_debevec.process(images, times, response) tonemap = cv.createTonemap(2.2) ldr = tonemap.process(hdr) merge_mertens = cv.createMergeMertens() fusion = merge_mertens.process(images) out_file_name = 'fusion' + date_time + '.png' OUT_FILE = os.path.join(settings.HDR_ROOT, out_file_name) cv.imwrite(OUT_FILE, fusion * 255)
def __init__(self, ip_pi): QThread.__init__(self) self.threadID = 1 self.name = "ImgThread" self.window = None self.saveOn = False self.mergeMertens = cv2.createMergeMertens( 0, 1, 1) #contrast saturation exposure # self.mergeMertens = cv2.createMergeMertens() # print("Contrast:",self.mergeMertens.getContrastWeight()) # print("Saturation:",self.mergeMertens.getSaturationWeight()) # print("Exposure:",self.mergeMertens.getExposureWeight()) self.mergeDebevec = cv2.createMergeDebevec() self.calibrateDebevec = cv2.createCalibrateDebevec() # self.toneMap = cv2.createTonemapReinhard(gamma=1.) self.toneMap = cv2.createTonemapDrago() # self.linearTonemap = cv2.createTonemap(1.) #Normalize with Gamma 1.2 # self.toneMap = cv2.createTonemapMantiuk() # self.claheProc = cv2.createCLAHE(clipLimit=1, tileGridSize=(8,8)) # self.simpleWB = cv2.xphoto.createSimpleWB() # self.simpleWB = cv2.xphoto.createGrayworldWB() # self.wb= False # self.equalize = False # self.clahe = False # self.clipLimit = 1. # self.alignMTB = cv2.createAlignMTB() self.invgamma = np.empty((1, 256), np.uint8) for i in range(256): self.invgamma[0, i] = np.clip(pow(i / 255.0, 0.45) * 255.0, 0, 255) self.gamma = np.empty((1, 256), np.uint8) for i in range(256): self.gamma[0, i] = np.clip(pow(i / 255.0, 2.2) * 255.0, 0, 255) self.reduceFactor = 1 self.ip_pi = ip_pi self.hflip = False self.vflip = False self.table = None self.doCalibrate = False try: npz = np.load("calibrate.npz") self.table = npz['table'] except Exception as e: pass
def test_merge(self): global images, times, resDebevec, hdrDebevec print("Merging images into one HDR image ... ") mergeDebevec = cv2.createMergeDebevec() tStart = time.time() hdrDebevec = mergeDebevec.process(images, times, resDebevec) tEnd = time.time() tOril = tEnd - tStart tStart = time.time() _merge.process(images, times, resDebevec) tEnd = time.time() tOur = tEnd - tStart cv_file = cv2.FileStorage("result.ext", cv2.FILE_STORAGE_READ) hdrDebevec_our = cv_file.getNode("result").mat() tdatEnd = time.time() tOurdat = tdatEnd - tStart print("tOril:", tOril) print("tOur:", tOur) print("tOurdata:", tOurdat) self.assertEqual(np.allclose(hdrDebevec_our, hdrDebevec), True)
def main(): #读取多张曝光的图像 images,times = readImagesAndTimes() #对齐图像 alignMTB = cv2.createAlignMTB() alignMTB.process(images, images) #恢复相机响应函数 calibrateDebevec = cv2.createCalibrateDebevec() responseDebevec = calibrateDebevec.process(images, times) # 将多张图像融合成hdr mergeDebevec = cv2.createMergeDebevec() hdrDebevec = mergeDebevec.process(images, times, responseDebevec) # 保存融合结果,可用ps打开 cv2.imwrite("hdrDebevec.hdr", hdrDebevec) # Tonemap using Drago's method to obtain 24-bit color image tonemapDrago = cv2.createTonemapDrago(1.0, 0.7) ldrDrago = tonemapDrago.process(hdrDebevec) ldrDrago = 3 * ldrDrago cv2.imwrite("ldr-Drago.jpg", ldrDrago * 255) # Tonemap using Durand's method obtain 24-bit color image tonemapDurand = cv2.createTonemapDurand(1.5,4,1.0,1,1) ldrDurand = tonemapDurand.process(hdrDebevec) ldrDurand = 3 * ldrDurand cv2.imwrite("ldr-Durand.jpg", ldrDurand * 255) # Tonemap using Reinhard's method to obtain 24-bit color image tonemapReinhard = cv2.createTonemapReinhard(1.5, 0,0,0) ldrReinhard = tonemapReinhard.process(hdrDebevec) cv2.imwrite("ldr-Reinhard.jpg", ldrReinhard * 255) # Tonemap using Mantiuk's method to obtain 24-bit color image tonemapMantiuk = cv2.createTonemapMantiuk(2.2,0.85, 1.2) ldrMantiuk = tonemapMantiuk.process(hdrDebevec) ldrMantiuk = 3 * ldrMantiuk cv2.imwrite("ldr-Mantiuk.jpg", ldrMantiuk * 255)
def main(): images, times = readImagesAndTimes() # Align input images alignMTB = cv2.createAlignMTB() alignMTB.process(images, images) # Obtain Camera Response Function (CRF) calibrateDebevec = cv2.createCalibrateDebevec() responseDebevec = calibrateDebevec.process(images, times) # Merge images into an HDR linear image mergeDebevec = cv2.createMergeDebevec() hdrDebevec = mergeDebevec.process(images, times, responseDebevec) # Save HDR image. cv2.imwrite("hdrDebevec.hdr", hdrDebevec) # Tonemap using Drago's method to obtain 24-bit color image tonemapDrago = cv2.createTonemapDrago(1.0, 0.7) ldrDrago = tonemapDrago.process(hdrDebevec) ldrDrago = 3 * ldrDrago cv2.imwrite("ldr-Drago.jpg", ldrDrago * 255)
def hdr(imgNames, exposures, writeName, user_id): writePath = os.path.join(app.config['UPLOAD_FOLDER'], writeName) imgPaths = [os.path.join(app.config['UPLOAD_FOLDER'], imgName) for imgName in imgNames] imgs= [cv2.imread(imgPath) for imgPath in imgPaths] # imgs = imageAlignment(images) merge_debvec = cv2.createMergeDebevec() exposures = np.array(exposures, dtype=np.float32) hdr_debvec = merge_debvec.process(imgs, times=exposures.copy()) tonemap = cv2.createTonemapDurand(gamma=2) res = tonemap.process(hdr_debvec) cv2.imwrite(writePath, res * 255) photo_id = addToDB(writeName, 'HDR', user_id) createThumbnail(writeName, photo_id) autoTag.delay(writePath, photo_id) for path in imgPaths: os.remove(path)
def main(): # input multiple exposure images folderNM = './img/europe/' fileNM = ["low.jpg", "middle.jpg", "high.jpg"] imgs = [cv2.imread(folderNM + fn) for fn in fileNM] # auto discover exposure time from Exif information of input image file exposureTimes = np.array([getExifInfo(folderNM + fn) for fn in fileNM], dtype=np.float32) # if Exif information is None, you should set exposure time manually # exposureTimes = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32) # Estimate camera response function (CRF) mergeDebevec = cv2.createMergeDebevec() hdr = mergeDebevec.process(imgs, times=exposureTimes.copy()) hdr[hdr < 0] = 0 cv2.imwrite("res/hdrDebevec.hdr", hdr) # linear res1 = 255 * hdr / hdr.max() # gamma res2 = 255 * (hdr / hdr.max())**0.2 # reinhardTM_global tonemap = cv2.createTonemapReinhard(gamma=1.5) res3 = tonemap.process(hdr.copy()) res3 = percentileCut(res3, 0.5, 99.9) * 255 cv2.imshow('result_linear', res1.astype(np.uint8)) cv2.imshow('result_gamma', res2.astype(np.uint8)) cv2.imshow('result_reinhard', res3.astype(np.uint8)) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imwrite("res/tonemappedImg.png", res3)
def createHDR(self): """ Numpy array of exposure times from exif data """ for image in self.image_paths: p = Photo(image) self.exposure_times.append(p.exifData()) times_np = np.asarray(self.exposure_times, dtype=np.float32) # Align Images alignMTB = cv.createAlignMTB() alignMTB.process(self.im_list, self.im_list) # Find Camera Response Curve calibrateDebevec = cv.createCalibrateDebevec() responseDebevec = calibrateDebevec.process(self.im_list, times_np) # Merge Images mergeDebevec = cv.createMergeDebevec() hdrDebevec = mergeDebevec.process(self.im_list, times_np, responseDebevec) # Generate HDR image and LDR tone mapped preview cv.imwrite("hdr.hdr", hdrDebevec) toneMapReinhard = cv.createTonemapReinhard(1.5, 0.0) ldrReinhard = toneMapReinhard.process(hdrDebevec) cv.imwrite("hdr_preview.jpg", ldrReinhard * 255)
images, times = readImagesAndTimes() # Align input images print("Aligning images ... ") alignMTB = cv2.createAlignMTB() alignMTB.process(images, images) # Obtain Camera Response Function (CRF) print("Calculating Camera Response Function (CRF) ... ") calibrateDebevec = cv2.createCalibrateDebevec() responseDebevec = calibrateDebevec.process(images, times) # Merge images into an HDR linear image print("Merging images into one HDR image ... ") mergeDebevec = cv2.createMergeDebevec() hdrDebevec = mergeDebevec.process(images, times, responseDebevec) # Save HDR image. cv2.imwrite("./images/HDR/hdrDebevec-example.hdr", hdrDebevec) print("saved hdrDebevec.hdr ") # Tonemap using Drago's method to obtain 24-bit color image print("Tonemaping using Drago's method ... ") tonemapDrago = cv2.createTonemapDrago(1.0, 0.7) ldrDrago = tonemapDrago.process(hdrDebevec) # The final output is multiplied by 3 just because it gave the most pleasing results. ldrDrago = 3 * ldrDrago cv2.imwrite("./images/HDR/ldr-Drago-example.jpg", ldrDrago * 255) cv2.imshow("ldr-Drago", ldrDrago) print("saved ldr-Drago.jpg")
import cv2 as cv import numpy as np # Loading exposure images into a list img_fn = ["img0.jpg", "img1.jpg", "img2.jpg", "img3.jpg"] img_list = [cv.imread(fn) for fn in img_fn] exposure_times = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32) # Merge exposures to HDR image merge_debevec = cv.createMergeDebevec() hdr_debevec = merge_debevec.process(img_list, times=exposure_times.copy()) merge_robertson = cv.createMergeRobertson() hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy()) # Tonemap HDR image tonemap1 = cv.createTonemap(gamma=2.2) res_debevec = tonemap1.process(hdr_debevec.copy()) tonemap2 = cv.createTonemap(gamma=1.3) res_robertson = tonemap2.process(hdr_robertson) # Exposure fusion using Mertens merge_mertens = cv.createMergeMertens() res_mertens = merge_mertens.process(img_list) # Convert datatype to 8-bit and save res_debevec_8bit = np.clip(res_debevec * 255, 0, 255).astype('uint8') res_robertson_8bit = np.clip(res_robertson * 255, 0, 255).astype('uint8') res_mertens_8bit = np.clip(res_mertens * 255, 0, 255).astype('uint8') cv.imwrite("hdr_debevec.jpg", res_debevec_8bit) cv.imwrite("hdr_robertson.jpg", res_robertson_8bit) cv.imwrite("fusion_mertens.jpg", res_mertens_8bit)
def merge_HDR(y_dm_predict, y_um_predict, x_in, alpha): ''' :param network_dm: :param network_um: :param x_in: :param alpha: :return: ''' [batch_size, height_size, width_size, channels_size] = np.shape(x_in) # batch_size, height_size, width_size, channels_size = x_in.get_shape().as_list() y_dm_predict = np.reshape(y_dm_predict, (batch_size, -1, height_size, width_size, channels_size)) y_um_predict = np.reshape(y_um_predict, (batch_size, -1, height_size, width_size, channels_size)) y_dm_predict = np.flip(y_dm_predict, axis=1) x_base = np.add(y_dm_predict[:, -1, :, :, :], y_um_predict[:, 0, :, :, :]) / 2.0 x_base = np.expand_dims(x_base, axis=1) # print('x_base.shape', np.shape(x_base)) temp = np.concatenate([y_dm_predict, x_base], axis=1) out_img_list_total = np.concatenate([temp, y_um_predict], axis=1) # print('out_img_list_total.len', np.shape(out_img_list_total)) merge_final_norm = list() merge_final = list() log_final = list() debevec_final = list() debevec_reverse = list() # new code for i in range(batch_size): # print('i', i) out_img_list = out_img_list_total[i].astype(np.float32) # log domain merge prev_img_log_mean = (out_img_list[7].astype(np.float32) + out_img_list[9].astype( np.float32)) / 2 # b = np.log10(200 + 1) # 对于Fairchild 200,NewHDR 27 b = 1 # up_log = np.power(10, out_img_list[9].astype(np.float32) * b) - 1 pre_img_hdr = np.power(10, prev_img_log_mean * b) - 1 # 对数域恢复到原始域,仍然是归一化的 # unit8 domain merge out_img_list = np.delete(out_img_list, [7, 9], axis=0) # print('out_img_list.len', len(out_img_list)) out_img_list = (255. * out_img_list).astype(np.float32) threshold = 64 # 默认值为64 stid = 0 # print('out_img_list.len',len(out_img_list)) prev_img = out_img_list[7].astype(np.float32) # 从最中间图像选取作为预设标准 out_img_list = np.flip(out_img_list, axis=0) # 由明到暗 for out_img in out_img_list[8:]: img = out_img.astype(np.float32) if (img > (prev_img + threshold)).sum() > 0: break prev_img = img[:, :, :] stid += 1 edid = 0 prev_img = out_img_list[7].astype(np.float32) out_img_list = np.flip(out_img_list, axis=0) # 由暗到明 for out_img in out_img_list[8:]: img = out_img.astype(np.float32) if (img < (prev_img - threshold)).sum() > 0: break prev_img = img[:, :, :] edid += 1 inputs = list() out_img_list_ = out_img_list[7 - stid:8 + edid] # 从中间向前后两侧选取阈值内的图像集 exposure_times = list() lowest_exp_time = 1 / 32. # 预设最小曝光时间,根据上面选取的图像数量进行曝光合成,默认值为1/1024 for i in range(len(out_img_list_)): inputs.append(out_img_list_[i].astype(np.uint8)) exposure_times.append(lowest_exp_time * np.power(np.sqrt(2.), i)) exposure_times = np.array(exposure_times).astype(np.float32) # print('exposure_times.len',len(exposure_times)) merge_debvec = cv2.createMergeDebevec() hdr_debvec = merge_debvec.process(inputs, times=exposure_times.copy()) # hdr_debvec = hdr_debvec[..., ::-1] merge_final_debvec_temp = (1 - alpha) * pre_img_hdr / np.max(pre_img_hdr) + alpha * hdr_debvec / np.max(hdr_debvec) merge_final_norm.append(merge_final_debvec_temp) # merge_final_debvec_gamma = np.power(merge_final_debvec_temp, 2.2) # merge_final.append(merge_final_debvec_gamma) # log_final.append(pre_img_hdr) # log_final.append(up_log) # debevec_final.append(hdr_debvec_rgb) # return merge_final_norm, merge_final, log_final, debevec_final return merge_final_norm
def mergeImages(images, times, responseDebevec): mergeDebevec = cv.createMergeDebevec() hdrDebevec = mergeDebevec.process(images, times, responseDebevec) cv.imwrite("hdr.hdr", hdrDebevec)
def recover_camera_response(inputFolder = '.', exposureListFile = None, outputResponseCurve = "camera_response.spi1d", outputResponseFormat = "spi1d", calibrationApproach = "berkeley", mergeExposures = False, mergedExposuresOutput = None, verbose = False, robertsonMaxIter = 30.0, robertsonThreshold = 0.01, berkeleyLambda = 20.0, berkeleySamples = 1024, berkeleySamplePlacementRandom = False): extensions = generalExtensions if exposureListFile: with open(exposureListFile, 'r') as f: exposuresList = f.readlines() exposuresList = [x.strip() for x in exposuresList if len(x) > 1] imageUris = [x.split(' ')[0] for x in exposuresList] exposure_times = [1.0/float(x.split(' ')[1]) for x in exposuresList] else: imageUris = sorted( os.listdir( inputFolder ) ) imageUris = [x for x in imageUris if (os.path.splitext(x)[-1].lower()[1:] in extensions) and (x[0] != '.')] if verbose: print( imageUris ) cwd = os.getcwd() os.chdir( inputFolder ) exposure_times = [0]*len(imageUris) for i in range(len(imageUris)): exposure_times[i] = getShutterSpeed( imageUris[i], verbose=verbose ) # List has to be sorted from longest shutter speed to shortest for opencv functions to work exposure_times, imageUris = (list(x) for x in zip(*sorted(zip(exposure_times, imageUris)))) imageUris.reverse() exposure_times.reverse() exposure_times = np.array(exposure_times, dtype=np.float32) if verbose: for exposure in zip(exposure_times, imageUris): print( "Image : %s, Shutter speed : %2.6f" % (exposure[1], exposure[0]) ) img_list = [cv2.imread(fn) for fn in imageUris ] if not exposureListFile: os.chdir( cwd ) if calibrationApproach == "robertson": merge = cv2.createMergeRobertson() calibrate = cv2.createCalibrateRobertson() calibrate.setMaxIter(robertsonMaxIter) calibrate.setThreshold(robertsonThreshold) if verbose: print( calibrationApproach ) print( "\tmax iter : %d" % robertsonMaxIter ) print( "\tthreshold : %f" % robertsonThreshold ) else: merge = cv2.createMergeDebevec() calibrate = cv2.createCalibrateDebevec() calibrate.setLambda(berkeleyLambda) calibrate.setSamples(berkeleySamples) calibrate.setRandom(berkeleySamplePlacementRandom) if verbose: print( calibrationApproach ) print( "\tlambda : %3.2f" % berkeleyLambda ) print( "\tsamples : %d" % berkeleySamples ) print( "\trandom : %s" % berkeleySamplePlacementRandom ) if verbose: print( "recovering camera response" ) curve = calibrate.process(img_list, times=exposure_times) if verbose: print( "writing camera response - %s, %s" % (outputResponseFormat, outputResponseCurve) ) if outputResponseFormat == "spi1d": with open(outputResponseCurve, "w") as f: f.write( "Version 1\n" ) f.write( "From 0.000000 1.000000\n" ) f.write( "Length 256\n" ) f.write( "Components 3\n" ) f.write( "{\n" ) for i in range(len(curve)): f.write( "%3.6f %3.6f %3.6f\n" % (curve[i][0][0]*0.18, curve[i][0][1]*0.18, curve[i][0][2]*0.18) ) f.write( "}\n" ) else: with open(outputResponseCurve, "w") as f: for i in range(len(curve)): f.write( "%3.6f %3.6f %3.6f\n" % (curve[i][0][0], curve[i][0][1], curve[i][0][2]) ) if mergedExposuresOutput: if verbose: print( "merging exposures" ) hdr = merge.process(img_list, times=exposure_times.copy(), response=curve.copy()) cv2.imwrite(mergedExposuresOutput, hdr)
""" import cv2 import numpy as np # 第一阶段只是将所有图像加载到列表中。此外,我们将需要常规HDR算法的曝光时间。注意数据类型,因为图像应为1通道或3通道8位(np.uint8),曝光时间需要为float32,以秒为单位。 # Loading exposure images into a list img_fn = ["1tl.jpg", "2tr.jpg", "3bl.jpg", "4br.jpg"] img_list = [cv2.imread(fn) for fn in img_fn] exposure_times = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32) # Merge exposures to HDR image # 在这个阶段,我们将曝光序列合并成一个HDR图像,显示了我们在OpenCV中的两种可能性。第一种方法是Debvec,第二种是Robertson。请注意,HDR图像的类型为float32,而不是uint8,因为它包含所有曝光图像的完整动态范围。 merge_debvec = cv2.createMergeDebevec() hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy()) merge_robertson = cv2.createMergeRobertson() hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy()) # Tonemap HDR image # 我们将32位浮点HDR数据映射到范围[0..1]。实际上,在某些情况下,值可能大于1或低于0,所以注意我们以后不得不剪切数据,以避免溢出。 tonemap1 = cv2.createTonemapDurand(gamma=2.2) res_debvec = tonemap1.process(hdr_debvec.copy()) tonemap2 = cv2.createTonemapDurand(gamma=1.3) res_robertson = tonemap2.process(hdr_robertson.copy()) # Exposure fusion using Mertens # 这里我们展示了一种可以合并曝光图像的替代算法,我们不需要曝光时间。我们也不需要使用任何tonemap算法,因为Mertens算法已经给出了[0..1]范围内的结果。 merge_mertens = cv2.createMergeMertens() res_mertens = merge_mertens.process(img_list)
http://pages.cs.wisc.edu/~csverma/CS766_09/HDRI/hdr.html ''' import cv2 import numpy as np import matplotlib.pyplot as plt # Load the images and exposure time into list img_name = ["1.jpg", "2.jpg", "3.jpg", "4.jpg", "5.jpg", "6.jpg", "7.jpg", "8.jpg", "9.jpg"] #put the image into list img_list = [cv2.imread(im) for im in img_name] #save the exposure time into array exposure_times = np.array([2, 1, 0.5, 0.25, 0.1667, 0.1, 0.0667, 0.0333, 0.1667], dtype=np.float32) # Merge images based on Debevec Algorithm debe1 = cv2.createMergeDebevec() debe_hdr = debe1.process(img_list, times=exposure_times.copy()) #merge_robertson = cv2.createMergeRobertson() #hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy()) # Do tone mapping based on Reinhard algorithm and do the gamma correction #tune the gamma value tonemap1 = cv2.createTonemapDurand(gamma=2.2) res_debvec1 = tonemap1.process(debe_hdr.copy()) #tonemap2 = cv2.createTonemapReinhard(gamma=2.2) #res_debvec2 = tonemap2.process(debe_hdr.copy()) #tonemap2 = cv2.createTonemapDurand(gamma=1.3) #res_robertson = tonemap2.process(hdr_robertson.copy()) # Convert typp to save and display res_debvec_8bit1 = np.clip(res_debvec1*255, 0, 255).astype('uint8')
import sys import numpy as np import cv2 if __name__ == '__main__': if len(sys.argv) < 2: print ("[Usage] python script <num of img> <dir of img>") sys.exit(0) num = int(sys.argv[1]) dir = sys.argv[2] img = [] for k in range(num): filename = dir + "/%02d.png" % k img.append(cv2.imread(filename, cv2.IMREAD_UNCHANGED)) lst = [32, 16, 8, 4, 2, 1, 0.5, 0.25, 0.125, 0.0625, 0.03125, 0.015625, 0.007125, 0.00390625, 0.001953125, 0.00097656525] exp = np.array(lst[0:num], dtype=np.float32) Align = cv2.createAlignMTB() Align.process(img, img) ResponseCurve = cv2.createCalibrateDebevec() cur = ResponseCurve.process(img, exp) RadianceMap = cv2.createMergeDebevec() hdr = RadianceMap.process(img, exp, cur) cv2.imwrite("result.hdr", hdr)
def enhance(image): images, times = readImagesAndTimes(image) alignMTB = cv2.createAlignMTB() mergeDebevec = cv2.createMergeDebevec() hdrDebevec = mergeDebevec.process(images, times, responseDebevec) return hdrDebevec