def change_constrast(img): lookuptable = np.empty((1, 256), np.uint8) gamma = 3.2 for i in range(256): lookuptable[0, i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255) return cv2.LUT(img, lookuptable)
def rgb_color_transformation(): image_dir_path = os.path.join(os.getcwd(), "assets/img") out_path = os.path.join(os.getcwd(), "out/rgb-color-transformation") ext = "jpg" # find all images in the directory images = glob.glob(f"{image_dir_path}/*.{ext}") # exit immediately when there are no images present on the folder num_images = len(images) if num_images == 0: print(f"No images present on the directory: {image_dir_path}") sys.exit(1) print(f"Reading all images from the directory: {image_dir_path}") print(f"Output will be saved in: {out_path}") # delete the folder to make sure we are create new files if os.path.exists(out_path): rmtree(out_path) # create the output folder if not exists if not os.path.exists(out_path): os.makedirs(out_path) a = 255 b = (2 * np.pi) / 255 c = np.pi / 5 # create empty numpy array needed by the lookup tables reds = np.array([]) greens = np.array([]) blues = np.array([]) # pre-compute and assign computed values in the lookup table for each channel for i in np.arange(0, 256): bx = b * i # perform transformation on the r channel: R = a | sin(bx) | red = a * np.absolute(np.sin(bx)) # perform transformation on the g channel: G = a | sin(bx + c) | green = a * np.absolute(np.sin(bx + c)) # perform transformation on the b channel: B = a | sin(bx + 2c) | blue = a * np.absolute(np.sin(bx + (2 * c))) # append to the numpy array reds = np.append(reds, [red]) greens = np.append(greens, [green]) blues = np.append(blues, [blue]) # iterate all found images and colorize them then write to the filesystem for image in images: basename = os.path.basename(image) filename, _ = os.path.splitext(basename) # read image in grayscale image = cv2.imread(image, cv2.IMREAD_GRAYSCALE) # apply lookup table each matrix: red, green and blue r_channel = cv2.LUT(image.copy(), reds) g_channel = cv2.LUT(image.copy(), greens) b_channel = cv2.LUT(image.copy(), blues) # merge the channels colored = cv2.merge([b_channel, g_channel, r_channel]) # write to the filesystem cv2.imwrite(f"{out_path}/{filename}.jpg", colored) print("Done processing images.")
else: outros.append(paciente + str(iteradorPacientes)) iteradorPacientes += 1 start, end = getStartEnd(image.shape[1]) segmentos = image[:, start:end, :] for i in range(RANGE): plt.imsave(diretorioTrain + 'img' + str(iterador) + '.jpg', arr=segmentos[:, i, :], cmap='gray') imageSaved = cv2.imread( diretorioTrain + 'img' + str(iterador) + '.jpg', 0) alturaX, larguraX = imageSaved.shape blur = cv2.medianBlur(imageSaved, 3) blank_image = cv2.LUT(blur, table) resized_image = cv2.resize(blank_image, tamanho_imagem) if not CROPPED: altura, largura = resized_image.shape alturaMeio = altura // 2 larguraMeio = largura // 2 larguraInicio = 0 larguraFim = 0 alturaInicio = 0 for j in range(largura): if resized_image[alturaMeio, j] > 50: larguraInicio = j break for j in reversed(range(largura)): if resized_image[alturaMeio, j] > 50: larguraFim = j
ap = argparse.ArgumentParser() ap.add_argument("-f", "--filename") args = vars(ap.parse_args()) filename = "../../data/images/candle.jpg" if args['filename']: filename = args['filename'] img = cv2.imread(filename) # specify gamma gamma = 1.5 # Full range of intensity values fullRange = np.arange(0, 256) #create LookUp table lut = np.uint8(255 * np.power((fullRange / 255.0), gamma)) # Transform the image using LUT - it maps the pixel intensities in the input to the output using values from lut output = cv2.LUT(img, lut) # Show the output combined = np.hstack([img, output]) cv2.namedWindow("Original Image -- Gamma enhancement", cv2.WINDOW_AUTOSIZE) cv2.imshow("Original Image -- Gamma enhancement", combined) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imwrite("results/gammaAdjusted.jpg", output)
def _adjust_contrast_torchvision_uint8(img, factor, mean): lut = np.arange(0, 256) * factor lut = lut + mean * (1 - factor) lut = clip(lut, img.dtype, 255) return cv2.LUT(img, lut)
def extend_images(img, label_ext, num_create, angle=15, pixels=2, gamma=2): if num_create == 1: global new_img new_img.append(img) hist, bins = np.histogram(img.ravel(), 256, [0, 256]) hist_avg = np.argmax(hist) # Augment borders of image top = int(0.1 * img.shape[0]) # shape[0] = rows bottom = top left = int(0.1 * img.shape[1]) # shape[1] = cols right = left dst = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_REPLICATE, None) resized_image = cv2.resize(dst, (80, 80)) # Rotate if num_create == 1: angle = 14 else: angle = -10 M = cv2.getRotationMatrix2D((16, 16), angle, 1) image1 = cv2.warpAffine(src=resized_image, M=M, dsize=(80, 80)) # Translate tx = 1 if num_create == 1: ty = -1 else: ty = 1 M = np.float32([[1, 0, tx], [0, 1, ty]]) image2 = cv2.warpAffine(src=image1, M=M, dsize=(80, 80)) # Bright if np.argmax(hist) >= 190: gamma = 0.6180 else: gamma = 1.618033 invGamma = 1.0 / gamma table = np.array([((i / 255.0)**invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8") image3 = cv2.LUT(image2, table) # Crop image and resize if num_create == 1: image_new = image3[0:65, 15:] else: image_new = image3[12:78, 2:72] resized_image = cv2.resize(image_new, (32, 32)) global count_images_ext count_images_ext += 1 if num_create == 1: global images_ext images_ext.append(resized_image) global label_images_ext label_images_ext.append(label_ext) else: global images_ext2 images_ext2.append(resized_image) global label_images_ext2 label_images_ext2.append(label_ext) if count_images_ext % 10 == 0 and plot == True: # Plot samples num = int((count_images_ext / 10)) - 1 sp = 1 fig = plt.figure(figsize=(4, 8)) fig.suptitle("Normal img - Create img", fontsize=14) for r in range(5): for c in range(3): ax = plt.subplot(5, 3, sp) img_ext_1 = new_img[r + (5 * num)] img_ext_2 = images_ext[r + (5 * num)] img_ext_3 = images_ext2[r + (5 * num)] if c == 0: ax.imshow(img_ext_1, cmap=cm.Greys_r) elif c == 1: ax.imshow(img_ext_2, cmap=cm.Greys_r) else: ax.imshow(img_ext_3, cmap=cm.Greys_r) ax.axis('off') #ax.set(xlabel='Hola', ylabel=str(label_images_ext[r])) #ax.set_title("Class: "+str(label_images_ext[r]),fontsize=10) sp += 1 plt.show() return resized_image
adjust_save_dir = r'/hdd/Temp/INRIA_gamma/adjust' model_dir = r'/hdd6/Models/UnetCrop_inria_aug_gamma_0_PS(572, 572)_BS5_EP100_LR0.0001_DS40_DR0.1_SFN32' imgs = sorted(glob(os.path.join(file_dir, '*_RGB.tif'))) n = len(imgs) * 5000 ** 2 for gamma in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.5, 2]: tf.reset_default_graph() img_mean = np.zeros(3) # make new imgs invGamma = 1.0 / gamma table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype('uint8') for file in tqdm(imgs): img = imageio.imread(file) gt = imageio.imread(file[:-8] + '_GT.tif') img_adjust = cv2.LUT(img, table) img_mean += get_sum_of_channel(img_adjust) img_name = os.path.basename(file) gt_name = os.path.basename(file[:-8] + '_GT.tif') imageio.imsave(os.path.join(adjust_save_dir, img_name), img_adjust) imageio.imsave(os.path.join(adjust_save_dir, gt_name), gt/255) img_mean = img_mean / n print(img_mean) file_list_valid = [[os.path.basename(x)] for x in sorted(glob(os.path.join(adjust_save_dir, '*_RGB.tif')))] file_list_valid_truth = [os.path.basename(x) for x in sorted(glob(os.path.join(adjust_save_dir, '*_GT.tif')))] # make the model # define place holder X = tf.placeholder(tf.float32, shape=[None, input_size[0], input_size[1], 3], name='X')
def brighten(img): res = cv2.LUT(img, table) return res
def gammacorrect(img, gamma): lookUpTable = np.empty((1, 256), np.uint8) for i in range(256): lookUpTable[0, i] = np.clip(math.pow(i / 255.0, gamma) * 255.0, 0, 255) return cv2.LUT(img, lookUpTable)
look_up_table = np.ones((256, 1), dtype = 'uint8' ) * 0 for i in range(256): if i < 64: look_up_table[i][0] = 0 elif i < 128: look_up_table[i][0] = 100 elif i < 192: look_up_table[i][0] = 200 else: look_up_table[i][0] = 300 # 画像の読み込み img_src = cv2.imread("./image/sora2.jpg", 1) # ソラリゼーション後の出力 img_post = cv2.LUT(img_src, look_up_table) # 表示 cv2.imshow("Show POSTERIZATION Image", img_post) cv2.waitKey(0) cv2.destroyAllWindows()
import subprocess import pytesseract as ocr import cv2 as cv import numpy as np img = cv.imread('Pic/1212.jpg', 0) # alpha and beta convert to adjust contrast new_image = cv.convertScaleAbs(img, alpha=0.7, beta=80) cv.imshow('contrast', new_image) # gamma convert to adjust brightness lookUpTable = np.empty((1, 256), np.uint8) for i in range(256): lookUpTable[0, i] = np.clip(pow(i / 255.0, 5) * 255.0, 0, 255) cvt = cv.LUT(img, lookUpTable) res = ocr.image_to_string(cvt) print(res) cv.imshow('img', cvt) if cv.waitKey(0): cv.destroyAllWindows()
def gamma_trans(img, gamma): # 具体做法是先归一划到1,然后Gamma作为指数值求出新的像素值再还原 gamma_table = [np.power(x / 255.0, gamma) * 255.0 for x in range(256)] gamma_table = np.round(np.array(gamma_table)).astype(np.uint8) # 实现映射用的是OpenCV查表函数 return cv2.LUT(img, gamma_table)
def adjust_gamma(image, gamma): #build a lookup table and map pixel values(0,255) to their adjusted gamma values invGamma = 1.0/gamma table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8") return cv2.LUT(image, table) #apply gamma correction using the lookup table
def adjust_gamma(self, image, gamma=1.0): inv_gamma = 1.0 / gamma table = np.array((np.arange(0, 256) / 255.0)**inv_gamma * 255, dtype="uint8") return cv2.LUT(image, table)
def division(img1, img2): img2 = cv.LUT(img2, mapeo_negativo()) img3 = img1 * img2 img3 /= img3.max() img3 *= 255 return img3
look_up_table_g = np.ones((256, 1), dtype='uint8') * 0 look_up_table_b = np.ones((256, 1), dtype='uint8') * 0 for i in range(256): 各RGBごとのルックアップテーブルを作成 look_up_table_r[i][0] = get_lookuptable_r(i) look_up_table_g[i][0] = get_lookuptable_g(i) look_up_table_b[i][0] = get_lookuptable_b(i) # 画像の読み込み img_src = cv2.imread("./image/sora2.jpg", 1) # 複数色のチャンネルを分割して配列で取得 # img_bgr[0] に青, img_bgr[1]に緑,img_bgr[2]に赤が入る。 # 読み込んだ画像のグレースケール化 img_gry = cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY) # 擬似カラー化 img_pcp_r = cv2.LUT(img_gry, look_up_table_r) img_pcp_g = cv2.LUT(img_gry, look_up_table_g) img_pcp_b = cv2.LUT(img_gry, look_up_table_b) # 各擬似カラー化した画像をマージ img_mrg = cv2.merge((img_pcp_b, img_pcp_g, img_pcp_r)) # 表示 cv2.imshow("Show Pseudo Color Processing Image", img_mrg) cv2.waitKey(0) cv2.destroyAllWindows()
def binarizacionPorTramos(img, inf, sup): tablaLut = np.array(range(0, 256)) #Genero un array desde 0 a 255 tablaLut[0:inf] = 0 #Trunco tablaLut[sup:256] = 0 return cv.LUT(img, tablaLut) #Aplico la transformación
def _get_batch(self): # self._batch = self.rec.next() data = np.zeros((self.batch_size, self.data_shape[0], self.data_shape[1], self.data_shape[2])) label = np.ones((self.batch_size, 1206)) * -1 label[:, :3] = np.array(list(self.data_shape)) seg_out_label = mx.nd.zeros( (self.batch_size, self.data_shape[1] / 4, self.data_shape[2] / 4)) self._fnames = [] for batch_idx in xrange(self.batch_size): item = self.rec.read_idx(self.index_table[self.curr_index]) header, img = mx.recordio.unpack_img(item) hdr = np.array([header.label.shape[0]] + header.label.tolist()) id0 = header.id seg = cv2.imread(self.imglst[str(id0)], -1) self._fnames.append(self.imglst[str(id0)]) if self.enable_aug: assert seg is not None, self.imglst[str(id0)] + " not found." img, hdr, seg = self._get_augmented(img, hdr, seg, self.data_shape) else: img, hdr, seg = self._get_resized(img, hdr, seg, self.data_shape) for chidx in xrange(3): data[batch_idx, chidx, :, :] = img[:, :, 2 - chidx] - self.mean_pixels[chidx] if seg is not None: hh, ww = seg.shape seg = cv2.resize(seg, (ww / 4, hh / 4), interpolation=cv2.INTER_NEAREST) seg = cv2.LUT(seg, self.lut).astype( np.uint8) # apply colormap to segmentation label seg_out_label[batch_idx, :, :] = seg.reshape( (1, self.data_shape[1] / 4, self.data_shape[2] / 4)) label[batch_idx, 3:3 + hdr.shape[0]] = hdr self.curr_index += 1 self._batch = mx.io.DataBatch( data=[mx.ndarray.array(data)], label=[mx.ndarray.array(label), seg_out_label]) if self.provide_label is None: # estimate the label shape for the first batch, always reshape to n*5 first_label = self._batch.label[0][0].asnumpy() print(map(int, first_label[:6])) print(", ".join(map(lambda x: "%.3f" % x, first_label[6:12]))) print(", ".join(map(lambda x: "%.3f" % x, first_label[12:18]))) self.batch_size = self._batch.label[0].shape[0] self.label_header_width = int(first_label[4]) self.label_object_width = int(first_label[5]) assert self.label_object_width >= 5, "object width must >=5" self.label_start = 4 + self.label_header_width self.max_objects = (first_label.size - self.label_start) // self.label_object_width self.label_shape = (self.batch_size, self.max_objects, self.label_object_width) self.label_end = self.label_start + self.max_objects * self.label_object_width self.provide_label = [('label_det', self.label_shape), ('seg_out_label', seg_out_label.shape)] print(self.provide_label) # modify label label = self._batch.label[0].asnumpy() label = label[:, self.label_start:self.label_end].reshape( (self.batch_size, self.max_objects, self.label_object_width)) self._batch.label = [mx.nd.array(label), seg_out_label]
def gamma_transform(image, gamma=2): invGamma = 1.0 / gamma table = np.array([((i / 255.0)**invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8") return cv2.LUT(image, table)
def adjust_gamma(image, gamma=0.8): """The nonlinearlity convert between human visual and display screen""" invGamma = 1.0 / gamma table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8") return cv2.LUT(image, table)
def main(username, img, anns, weight_, m): # get image size, basically height and width height, width, channels = img.shape heightAnns, widthAnns = anns.shape if (widthAnns != width): img = cv.resize(img, (widthAnns, heightAnns)) height, width, channels = img.shape # flattening (i.e. vectorizing) matrices to pass it to C++ function (** OPENCV LOADS BGR RATHER THAN RGB!) img_b = img[:, :, 0].flatten() # R channel img_g = img[:, :, 1].flatten() # G channel img_r = img[:, :, 2].flatten() # B channel img_b = img_b.astype(np.int32) img_g = img_g.astype(np.int32) img_r = img_r.astype(np.int32) # image size sz = width * height # load PASCAL colormap in CV format lut = np.load('static/images/PASCALlutW.npy') ## RGR parameters # fixed parameters # m = .1 # theta_m: balance between numSets = 8 # number of seeds sets (samplings) # cellSize = 10-int(weight_) # average spacing between samples cellSize = 1.333 # average spacing between samples # Rectangular Kernel - equal to strel in matlab SE = cv.getStructuringElement( cv.MORPH_RECT, (80, 80)) # used for identifying far background # RGR - refine each class # list of annotated classes clsList = np.unique(anns) clsList = np.delete(clsList, 0) # remove class 0 numCls = clsList.size # number of classes # annotations masks per class clsMap = np.zeros((height, width, numCls)) for itCls in range(0, numCls): np.putmask(clsMap[:, :, itCls], anns == clsList[itCls], 1) # mask of annotated pixels: # in this case, only annotated traces are high-confidence (index 2), # all others are uncertain (index 0) preSeg = np.int32(np.zeros((height, width))) np.putmask(preSeg, anns > 0, 2) RoI = preSeg # identify all high confidence pixels composing the RoI area = np.count_nonzero(RoI) # R_H is the high confidence region, the union of R_nB and R_F R_H = np.nonzero(RoI.flatten('F') > 0) R_H = R_H[0] # number of seeds to be sampled is defined by the ratio between # |R_H| and desired spacing between seeds (cellSize) # round up numSamples = np.ceil(area / cellSize) preSeg = preSeg.flatten() # matrix that will contain the scoremaps for each iteration # ref_cls = np.zeros((height, width, numCls, numSets),dtype=float) ref_cls = np.zeros((height * width * numCls, numSets), dtype=float) num_cores = multiprocessing.cpu_count() manager = multiprocessing.Manager() return_dict = manager.dict() jobs = [] for itSet in range(0, numSets): p = multiprocessing.Process(target=regGrowing, args=(area, numSamples, R_H, height, width, sz, preSeg, m, img_r, img_g, img_b, clsMap, numCls, return_dict, itSet)) jobs.append(p) p.start() for proc in jobs: proc.join() outputPar = return_dict.values() outputPar = np.asarray(outputPar) # swapping axes, because parallel returns (numSets,...) ref_cls = np.moveaxis(outputPar, 0, 3) # averaging scores obtained for each set of seeds ref_M = (np.sum(ref_cls, axis=3)) / numSets # maximum likelihood across refined classes scores ref_M maxScores = np.amax(ref_M, axis=2) maxClasses = np.argmax(ref_M, axis=2) detMask = np.uint8(maxClasses + 1) finalMask = np.zeros((height, width), dtype=float) for itCls in range(0, numCls): np.putmask(finalMask, detMask == itCls + 1, clsList[itCls]) finalMask = np.uint8(finalMask - 1) np.save('static/' + username + '/lastmask.npy', np.asarray(finalMask, dtype=float)) # sio.savemat('intermediate.mat', mdict={'anns':anns,'ref_M': ref_M,'ref_cls':ref_cls,'finalMaskRGR':finalMask}) # apply colormap _, alpha = cv.threshold(finalMask, 0, 255, cv.THRESH_BINARY) finalMask = cv.cvtColor(np.uint8(finalMask), cv.COLOR_GRAY2RGB) im_color = cv.LUT(finalMask, lut) b, g, r = cv.split(im_color) rgba = [b, g, r, alpha] im_color = cv.merge(rgba, 4) return im_color
def __call__(self, image, labels=None): image = cv2.LUT(image, table) if labels is None: return image else: return image, labels
def _adjust_brightness_torchvision_uint8(img, factor): lut = np.arange(0, 256) * factor lut = np.clip(lut, 0, 255).astype(np.uint8) return cv2.LUT(img, lut)
def gamma_trans(img, gamma): gamma_table = [np.power(x / 255.0, gamma) * 255.0 for x in range(256)] gamma_table = np.round(np.array(gamma_table)).astype(np.uint8) return cv2.LUT(img, gamma_table)
def adjust_gamma(gamma): invGamma = 1.0 / gamma table = np.array([((i / 255.0)**invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8") return cv2.LUT(img, table)
def gamma_trans(img, gamma): # gamma函数处理 gamma_table = [np.power(x / 255.0, gamma) * 255.0 for x in range(256)] # 建立映射表 gamma_table = np.round(np.array(gamma_table)).astype(np.uint8) # 颜色值为整数 return cv2.LUT(img, gamma_table) # 图片颜色查表。另外可以根据光强(颜色)均匀化原则设计自适应算法。
# randomly erode, dilate or nothing # we could move it also after binarization kernel = np.ones((3, 3), np.uint8) a = random.choice([1, 2, 3]) if a == 1: gaussiannoise = cv2.dilate(gaussiannoise, kernel, iterations=1) elif a == 2: gaussiannoise = cv2.erode(gaussiannoise, kernel, iterations=1) # add random gamma correction gamma = np.random.uniform(param_gamma_low, param_gamma_high) invGamma = 1.0 / gamma table = np.array([((i / 255.0)**invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8") gammacorrected = cv2.LUT(np.uint8(gaussiannoise), table) # binarize image with Otsu otsu_th, binarized = cv2.threshold(gammacorrected, 0, 1, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) # Kanungo noise dist = cv2.distanceTransform( 1 - binarized, cv.CV_DIST_L1, 3) # try cv2.DIST_L1 for newer versions of OpenCV dist2 = cv2.distanceTransform( binarized, cv.CV_DIST_L1, 3) # try cv2.DIST_L1 for newer versions of OpenCV P = (param_kanungo_alpha0 * np.exp(-param_kanungo_alpha * dist**2)) + param_kanungo_mu P2 = (param_kanungo_beta0 *
import numpy as np import cv2 as cv from matplotlib import pyplot as plt img1 = cv.imread('images/im03.png', cv.IMREAD_COLOR) img = cv.cvtColor(img1, cv.COLOR_RGB2BGR) f, axarr = plt.subplots(2, 2) axarr[0, 0].imshow(img, cmap='gray') axarr[0, 0].set_title("Original image") x = np.arange(0, 256) axarr[0, 1].plot(x) y = np.arange(255, -1, -1) axarr[1, 1].plot(y) axarr[1, 0].imshow(cv.LUT(img, y), cmap='gray') axarr[1, 0].set_title("Transformed image") plt.show()
def adjust_gamma(image, gamma, gamma_break=None, linear_part=True, inverse=False, max_val=255): if gamma == 1: return image # build a lookup table mapping the pixel values [0, 255] to # their adjusted gamma values invGamma = gamma if inverse else 1.0 / gamma gamma_break = gamma_break or 0 if image.dtype == 'uint8' and gamma_break == 0: # apply gamma correction using the lookup table max_val = min(max_val, 255) table = np.array([((i / max_val)**invGamma) * max_val for i in np.arange(0, max_val + 1) ]).astype(image.dtype) adj_img = cv2.LUT(image, table) elif gamma_break == 0: adj_img = np.round( ((image / max_val)**invGamma) * max_val).astype(image.dtype) elif True: # from https://se.mathworks.com/help/vision/ref/gammacorrection.html b_p = gamma_break s_ls = 1 / (gamma / b_p**(1 / gamma - 1) - gamma * gamma_break + gamma_break) f_s = gamma * s_ls / b_p**(1 / gamma - 1) c_o = f_s * b_p**(1 / gamma) - s_ls * b_p img = image.flatten() / max_val I = img <= (s_ls if inverse else 1) * b_p nI = np.logical_not(I) adj_img = np.zeros(image.shape).flatten() adj_img[I] = (img[I] / s_ls) if inverse else (img[I] * s_ls) adj_img[nI] = (((img[nI] + c_o) / f_s)** gamma) if inverse else (f_s * img[nI]**(1 / gamma) - c_o) adj_img = (adj_img * max_val).reshape(image.shape).astype( image.dtype) else: # from https://en.wikipedia.org/wiki/SRGB if 1: a = gamma_break K0 = a / (gamma - 1) else: K0 = gamma_break a = K0 * (gamma - 1) alpha = 1 + a th = alpha**gamma * (gamma - 1)**(gamma - 1) / a**( gamma - 1) / gamma**gamma lim = K0 if inverse else K0 / th img = image.flatten() / max_val I = img <= lim nI = np.logical_not(I) adj_img = np.zeros(image.shape).flatten() adj_img[I] = (img[I] / th) if inverse else (th * img[I]) adj_img[nI] = (((img[nI] + a) / alpha)**gamma) if inverse else ( alpha * img[nI]**(1 / gamma) - a) adj_img = (adj_img * max_val).reshape(image.shape).astype( image.dtype) # adj_img = np.round(adj_img * max_val).reshape(image.shape).astype(image.dtype) return adj_img
def __call__(self, img, events=None): if not self.display: return img = self.crop_outer_border(img, self.border) with Timer('Gamma correction'): if not self.gamma == 1.0: img = cv2.LUT(img, self.gamma_LUT) with Timer('Contrast/Brighntess correction'): if not (self.contrast == 1.0 and self.brightness == 0.0): cv2.convertScaleAbs(src=img, dst=img, alpha=self.contrast, beta=self.brightness) with Timer('Saturation correction'): img_is_color = (len(img.shape) == 3) if img_is_color and not self.saturation == 1.0: img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype("float32") (h, s, v) = cv2.split(img) s = s * self.saturation s = np.clip(s, 0, 255) img = cv2.merge([h, s, v]) img = cv2.cvtColor(img.astype("uint8"), cv2.COLOR_HSV2BGR) if self.show_events: assert (events is not None) event_preview = make_event_preview( events, mode=self.event_display_mode, num_bins_to_show=self.num_bins_to_show) event_preview = self.crop_outer_border(event_preview, self.border) if self.show_events: img_is_color = (len(img.shape) == 3) preview_is_color = (len(event_preview.shape) == 3) if (preview_is_color and not img_is_color): img = np.dstack([img] * 3) elif (img_is_color and not preview_is_color): event_preview = np.dstack([event_preview] * 3) if self.show_reconstruction: img = np.hstack([event_preview, img]) else: img = event_preview cv2.imshow(self.window_name, img) c = cv2.waitKey(self.wait_time) if c == ord('s'): now = datetime.now() path_to_screenshot = '/tmp/screenshot-{}.png'.format( now.strftime("%d-%m-%Y-%H-%M-%S")) cv2.imwrite(path_to_screenshot, img) print('Saving screenshot to: {}'.format(path_to_screenshot)) elif c == ord('e'): self.show_events = not self.show_events elif c == ord('f'): self.show_reconstruction = not self.show_reconstruction