def computeHDR(images, log_exposure_times, smoothing_lambda=100.): images = [np.atleast_3d(i) for i in images] num_channels = images[0].shape[2] hdr_image = np.zeros(images[0].shape, dtype=np.float64) for channel in range(num_channels): # Collect the current layer of each input image from # the exposure stack layer_stack = [img[:, :, channel] for img in images] # Sample image intensities intensity_samples = hdr.sampleIntensities(layer_stack) # Compute Response Curve response_curve = hdr.computeResponseCurve(intensity_samples, log_exposure_times, smoothing_lambda, hdr.linearWeight) # Build radiance map img_rad_map = hdr.computeRadianceMap(layer_stack, log_exposure_times, response_curve, hdr.linearWeight) # We don't do tone mapping, but here is where it would happen. Some # methods work on each layer, others work on all the layers at once; # feel free to experiment. If you implement tone mapping then the # tone mapping function MUST appear in your report to receive # credit. maxx = np.max(img_rad_map) maxx01 = maxx * 0.001 iaeg = np.where(img_rad_map > maxx01, 0, img_rad_map) img_rad_map = np.exp(iaeg) out = np.zeros(shape=img_rad_map.shape, dtype=img_rad_map.dtype) cv2.normalize(img_rad_map, out, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX) hdr_image[..., channel] = out #tonemap = cv2.createTonemap(2.2) #ldr = tonemap.process(hdr_image) #out = np.zeros(shape=hdr_image.shape, dtype=hdr_image.dtype) #cv2.normalize(ldr, out, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX) return hdr_image
def computeHDR(images, log_exposure_times, smoothing_lambda=100.): """Computational pipeline to produce the HDR images according to the process in the Debevec paper. NOTE: This function is NOT scored as part of this assignment. You may modify it as you see fit. The basic overview is to do the following for each channel: 1. Sample pixel intensities from random locations through the image stack to determine the camera response curve 2. Compute response curves for each color channel 3. Build image radiance map from response curves 4. Apply tone mapping to fit the high dynamic range values into a limited range for a specific print or display medium (NOTE: we don't do this part except to normalize - but you're free to experiment.) Parameters ---------- images : list<numpy.ndarray> A list containing an exposure stack of images log_exposure_times : numpy.ndarray The log exposure times for each image in the exposure stack smoothing_lambda : np.int (Optional) A constant value to correct for scale differences between data and smoothing terms in the constraint matrix -- source paper suggests a value of 100. Returns ------- numpy.ndarray The resulting HDR with intensities scaled to fit uint8 range """ images = [np.atleast_3d(i) for i in images] num_channels = images[0].shape[2] hdr_image = np.zeros(images[0].shape, dtype=np.float64) for channel in range(num_channels): # Collect the current layer of each input image from # the exposure stack layer_stack = [img[:, :, channel] for img in images] # Sample image intensities intensity_samples = hdr.sampleIntensities(layer_stack) # Compute Response Curve response_curve = hdr.computeResponseCurve(intensity_samples, log_exposure_times, smoothing_lambda, hdr.linearWeight) # Build radiance map img_rad_map = hdr.computeRadianceMap(layer_stack, log_exposure_times, response_curve, hdr.linearWeight) # We don't do tone mapping, but here is where it would happen. Some # methods work on each layer, others work on all the layers at once; # feel free to experiment. If you implement tone mapping then the # tone mapping function MUST appear in your report to receive # credit. out = np.zeros(shape=img_rad_map.shape, dtype=img_rad_map.dtype) cv2.normalize(img_rad_map, out, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX) hdr_image[..., channel] = out return hdr_image
def computeHDR(images, log_exposure_times, smoothing_lambda=100.): images = [np.atleast_3d(i) for i in images] num_channels = images[0].shape[2] hdr_image = np.zeros(images[0].shape, dtype=np.float64) for channel in range(num_channels): # Collect the current layer of each input image from # the exposure stack layer_stack = [img[:, :, channel] for img in images] # Sample image intensities intensity_samples = hdr.sampleIntensities(layer_stack) # Compute Response Curve response_curve = hdr.computeResponseCurve(intensity_samples, log_exposure_times, smoothing_lambda, hdr.linearWeight) # Build radiance map img_rad_map = hdr.computeRadianceMap(layer_stack, log_exposure_times, response_curve, hdr.linearWeight) # We don't do tone mapping, but here is where it would happen. Some # methods work on each layer, others work on all the layers at once; # feel free to experiment. If you implement tone mapping then the # tone mapping function MUST appear in your report to receive # credit. out = np.zeros(shape=img_rad_map.shape, dtype=img_rad_map.dtype) cv2.normalize(np.exp(img_rad_map), out, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX) hdr_image[..., channel] = out #cv2.imwrite('boya_hdr.hdr', hdr_image) #cv2.imwrite('boya_hdr_255.hdr', hdr_image * 255) #out = np.zeros(shape=hdr_image.shape, dtype=hdr_image.dtype) #cv2.normalize(hdr_image, out, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX) print("hdr pre tonemap") #print(hdr_image[:,:,0]) #out = countTonemap(hdr_image) """ out = np.zeros(shape=img_rad_map.shape, dtype=img_rad_map.dtype) tonemap = cv2.createTonemap(2.2) ldr = tonemap.process(hdr_image) print("ldr after tonemap") print(ldr[..., 0]) cv2.normalize(ldr, out, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX) out = np.zeros(shape=img_rad_map.shape, dtype=img_rad_map.dtype) cv2.normalize(img_rad_map, out, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX) hdr_image[..., channel] = out print(out[..., 0]) return out """ #out = np.zeros(shape=hdr_image.shape, dtype=hdr_image.dtype) #cv2.normalize(hdr_image, out, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX) #hdr_image = out print("ldr after tonemap") #print(out[..., 0]) return hdr_image
def computeHDR(images, log_exposure_times, output_folder, smoothing_lambda=100., rad_plot=False): """Computational pipeline to produce the HDR images according to the process in the Debevec paper. The basic overview is to do the following for each channel: 1. Sample pixel intensities from random locations through the image stack to determine the camera response curve 2. Compute response curves for each color channel 3. Build image radiance map from response curves Parameters ---------- images : list<numpy.ndarray> A list containing an exposure stack of images log_exposure_times : numpy.ndarray The log exposure times for each image in the exposure stack smoothing_lambda : np.int (Optional) A constant value to correct for scale differences between data and smoothing terms in the constraint matrix -- source paper suggests a value of 100. Returns ------- numpy.ndarray The resulting HDR with intensities scaled to fit uint8 range """ images = [np.atleast_3d(i) for i in images] num_channels = images[0].shape[2] hdr_image = np.zeros(images[0].shape, dtype=np.float64) for channel in range(num_channels): # Collect the current layer of each input image from # the exposure stack layer_stack = [img[:, :, channel] for img in images] # Sample image intensities intensity_samples = hdr.sampleIntensities(layer_stack) # Compute Response Curve response_curve = hdr.computeResponseCurve(intensity_samples, log_exposure_times, smoothing_lambda, hdr.linearWeight) # Build radiance map rad_map = hdr.computeRadianceMap(layer_stack, log_exposure_times, response_curve, hdr.linearWeight) if rad_plot: plotRadianceMap(rad_map, output_folder, channel) # rad_map output is logarithmic and must be normalized for images out = np.zeros(shape=rad_map.shape, dtype=rad_map.dtype) cv2.normalize(rad_map, out, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX) hdr_image[..., channel] = out return hdr_image