def predict_image(self, test_img): """ predicts classes of input image :param test_img: filepath to image to predict on :param show: displays segmentation results :return: segmented result """ img = np.array( rgb2gray(imread(test_img).astype('float')).reshape(5, 216, 160)[-2]) / 256 plist = [] # create patches from an entire slice img_1 = adjust_sigmoid(img).astype(float) edges_1 = adjust_sigmoid(img, inv=True).astype(float) edges_2 = img_1 edges_5_n = normalize(laplace(img_1)) edges_5_n = img_as_float(img_as_ubyte(edges_5_n)) plist.append(extract_patches_2d(edges_1, (23, 23))) plist.append(extract_patches_2d(edges_2, (23, 23))) plist.append(extract_patches_2d(edges_5_n, (23, 23))) patches = np.array( zip(np.array(plist[0]), np.array(plist[1]), np.array(plist[2]))) # predict classes of each pixel based on model full_pred = self.model.predict_classes(patches) fp1 = full_pred.reshape(194, 138) return fp1
def predict_image(self, test_img): """ predicts classes of input image :param test_img: filepath to image to predict on :param show: displays segmentation results :return: segmented result """ img = np.array( rgb2gray( imread( test_img ).astype( 'float' ) ).reshape( 5, 216, 160 )[-2] ) / 256 plist = [] # create patches from an entire slice img_1 = adjust_sigmoid( img ).astype( float ) edges_1 = adjust_sigmoid( img, inv=True ).astype( float ) edges_2 = img_1 edges_5_n = normalize( laplace( img_1 ) ) edges_5_n = img_as_float( img_as_ubyte( edges_5_n ) ) plist.append( extract_patches_2d( edges_1, (23, 23) ) ) plist.append( extract_patches_2d( edges_2, (23, 23) ) ) plist.append( extract_patches_2d( edges_5_n, (23, 23) ) ) patches = np.array( zip( np.array( plist[0] ), np.array( plist[1] ), np.array( plist[2] ) ) ) # predict classes of each pixel based on model full_pred = self.model.predict_classes( patches ) fp1 = full_pred.reshape( 194, 138 ) return fp1
def improve_scenebkp(sub_templateg, alg=1): ref = np.asarray(PIL.Image.fromarray(np.asarray(pd.read_pickle('./AuxFiles/refhist.pkl')))) #if alg==1: #sub_templateg = cv2.fastNlMeansDenoising(sub_templateg,None,10,7,21) if is_low_contrast(sub_templateg, 0.35): temp_img = np.copy(sub_templateg).astype(float) temp_img[temp_img <= 30] = np.nan temp_img_std = np.nanstd(temp_img) del temp_img if temp_img_std <= 30: print('Scene is low contrasted, improving...') sub_templateg = hist.adjust_gamma(sub_templateg, gamma=1.7) sub_templateg = adjust_sigmoid(sub_templateg) sub_templateg = hist.adjust_gamma(sub_templateg, gamma=1.2) hist1 = cv2.calcHist([sub_templateg], [0], None, [256], [0, 256]) hist2 = cv2.calcHist([ref], [0], None, [256], [0, 256]) sim = cv2.compareHist(hist1, hist2, 0) if sim < 0.8: if alg == 1: print('Computing large objects.') sub_templateg = cv2.medianBlur(sub_templateg, 5) sub_templateg = cv2.bilateralFilter(sub_templateg, 3, 3, 3) sub_templateg = hist.adjust_gamma(sub_templateg, gamma=3.0) sub_templateg = adjust_sigmoid(sub_templateg) sub_templateg = hist.adjust_gamma(sub_templateg, gamma=1.0) if alg == 2: print('Computing small objects') sub_templateg = hist.adjust_gamma(sub_templateg, gamma=1.5) else: if alg ==1: print('Computing large objects.') sub_templateg = hist.adjust_gamma(sub_templateg, gamma=1.5) sub_templateg = adjust_sigmoid(sub_templateg) sub_templateg = hist.adjust_gamma(sub_templateg, gamma=1.0) else: print('Computing small objects.') sub_templateg = match_histograms(sub_templateg, ref) sub_templateg = convert(sub_templateg, 0, 255, np.uint8) return sub_templateg
def preproc(self, img, size, pixel_spacing, equalize=True, crop=True): """crop center and resize""" # TODO: this is stupid, you could crop out the heart # But should test this if img.shape[0] < img.shape[1]: img = img.T # Standardize based on pixel spacing img = transform.resize(img, (int(img.shape[0]*(1.0/np.float32(pixel_spacing[0]))), int(img.shape[1]*(1.0/np.float32(pixel_spacing[1]))))) # we crop image from center short_egde = min(img.shape[:2]) yy = int((img.shape[0] - short_egde) / 2) xx = int((img.shape[1] - short_egde) / 2) if crop: crop_img = img[yy : yy + short_egde, xx : xx + short_egde] # resize to 64, 64 resized_img = transform.resize(crop_img, (size, size)) else: resized_img = img #resized_img = gaussian_filter(resized_img, sigma=1) #resized_img = median_filter(resized_img, size=(3,3)) if equalize: resized_img = equalize_hist(resized_img) resized_img = adjust_sigmoid(resized_img) resized_img *= 255. return resized_img.astype("float32")
def run(self, img=misc.lena(), increase=True): img = misc.imread('/Users/Daniel/Desktop/p0.jpg') img_blurred = self.__blur(img) img = self.__divide(img, img_blurred) if False: img = exposure.adjust_sigmoid(img) misc.imsave('/Users/Daniel/Desktop/p1.jpg', img)
def img_exposure_sigmoid(image, sigmoid, prob=1): generator = generator_class(prob) if generator: img = exposure.adjust_sigmoid(image, sigmoid) else: img = [] return img
def proc_mbi(imgarray): # Normalize image: img = img_as_float(imgarray,force_copy=True) # Image equalization (Contrast stretching): p2,p98 = np.percentile(img, (2,98)) img = exposure.rescale_intensity(img, in_range=(p2, p98), out_range=(0, 1)) # Gamma correction: #img = exposure.adjust_gamma(img, 0.5) # Or Sigmoid correction: img = exposure.adjust_sigmoid(img) print "Init Morph Proc..." sizes = range(2,40,5) angles = [0,18,36,54,72,90,108,126,144,162] szimg = img.shape all_thr = np.zeros((len(sizes),szimg[0], szimg[1])).astype('float64') all_dmp = np.zeros((len(sizes) - 1,szimg[0], szimg[1])).astype('float64') idx = 0 for sz in sizes: print sz builds_by_size = np.zeros(szimg).astype('float64') for ang in angles: print ang stel = ia870.iaseline(sz, ang) oprec = opening_by_reconstruction(img, stel) thr = np.absolute(img-oprec) builds_by_size += thr all_thr[idx,:,:] = (builds_by_size / len(angles)) if idx>0: all_dmp[idx-1,:,:] = all_thr[idx,:,:] - all_thr[idx-1,:,:] idx += 1 mbi = np.mean(all_dmp, axis=0) return mbi
def Hist_sigmoid(self): self.temp = self.temp + 1 img = img_as_float(self.img) sigmoid_corrected = adjust_sigmoid(img) io.imsave('static/Image/temp' + str(self.temp) + '.' + self.format, sigmoid_corrected) return 'static/Image/temp' + str(self.temp) + '.' + self.format
def preprocessing(chunk, processing_type): """ performing preprocessing before motion correction :param chunk: 3d array, frame * y * x :param processing_type: int, type of preprocessing 0: nothing, no preprocessing 1: histogram equlization, return uint8 bit chunk 2: rectifyinng with a certain threshold 3: gamma correct each frame by gamma=0.1 4: brightness contrast adjustment 5: spatial filtering 6: tukey window with alpha 0.1 :return: preprocessed chunk """ if processing_type == 0: return chunk elif processing_type == 1: chunk_eh = np.zeros(chunk.shape, dtype=np.uint8) for i in range(chunk.shape[0]): frame = chunk[i].astype(np.float32) frame = (frame - np.amin(frame)) / (np.amax(frame) - np.amin(frame)) chunk_eh[i] = cv2.equalizeHist((frame * 255).astype(np.uint8)) return chunk_eh elif processing_type == 2: low_thr = -200 high_thr = 2000 chunk[chunk < low_thr] = low_thr chunk[chunk > high_thr] = high_thr return chunk elif processing_type == 3: chunk_gamma = np.zeros(chunk.shape, dtype=np.float32) for i in range(chunk.shape[0]): frame = chunk[i].astype(np.float32) frame = (frame - np.amin(frame)) / (np.amax(frame) - np.amin(frame)) chunk_gamma[i] = exp.adjust_gamma(frame, gamma=0.1) return chunk_gamma.astype(chunk.dtype) elif processing_type == 4: chunk_sigmoid = np.zeros(chunk.shape, dtype=np.float32) for i in range(chunk.shape[0]): frame = chunk[i].astype(np.float32) frame = (frame - np.amin(frame)) / (np.amax(frame) - np.amin(frame)) frame_sigmoid = exp.adjust_sigmoid(frame, cutoff=0.1, gain=10, inv=False) # plt.imshow(frame_sigmoid) # plt.show() chunk_sigmoid[i] = frame_sigmoid return chunk_sigmoid.astype(chunk.dtype) elif processing_type == 5: chunk_filtered = np.zeros(chunk.shape, dtype=np.float32) for i in range(chunk.shape[0]): frame = chunk[i].astype(np.float32) frame_filtered = ni.gaussian_filter(frame, sigma=10.) chunk_filtered[i] = frame_filtered return chunk_filtered.astype(chunk.dtype) elif processing_type == 6: window = tukey_2d(shape=(chunk.shape[1], chunk.shape[2]), alpha=0.1, sym=True) window = np.array([window]) return (chunk * window).astype(chunk.dtype) else: raise LookupError('Do not understand framewise preprocessing type.')
def process_frame(frame, bbox): # crop frame (assuming CROP_X and Y are set in env) #frame = frame[CROP_Y[0]:CROP_Y[1], CROP_X[0]:CROP_X[1], :] frame = frame[bbox[1]:bbox[1] + bbox[2], bbox[0]:bbox[0] + bbox[3], :] # Adjust contrast frame = adjust_sigmoid(frame, gain=5, cutoff=0.1) return frame print('defined')
def gray_process(img, dsize=25, cutoff=0.5, gain=10): img_gr = np.zeros_like(img[:, :, 0]) img_gr = (img[:, :, 0] + img[:, :, 1] + img[:, :, 2]) / 3 selem = morphology.disk(dsize) img_gr = filters.rank.equalize(img_gr, selem=selem) img_gr = img_as_float(img_gr) img_gr = exposure.adjust_sigmoid(img_gr, cutoff=cutoff, gain=gain) return img_gr
def transform(src_dir, city, s, gamma, method='CLAHE'): """Scales all images of given city @param: src_dir is directory the tiles are in @param: city is the positive filter for the city you're looking for @param: s is vector of color scalings for that city @param: gamma is exponent for scaling @param: type of method to transform by""" imList = [f for f in os.listdir(src_dir) if ('RGB' in f and city in f and 'tif' in f)] path = src_dir + 'transformed_{}/'.format(method) try: os.mkdir(path) except: pass if method == 'WB2': #transformations considering entire city of data threshold = 0.005 cum_hist = {} for i in range(3): #each color channel histSum = 0 cumSum = 0 for file in imList: image = imio.imread(src_dir + file) hist, bins = np.histogram(image[...,i].ravel(), 256, (0,255)) histSum += hist.sum() cumSum += np.cumsum(hist) cum_hist[i] = cumSum / histSum for file in imList: name = file[0].upper() + file[1:] image = imio.imread(src_dir + file) if method == 'gamma_correction': new_image = gammaCorrect(image, s, gamma) elif method == 'scale_only': new_image = gamma(image, s, gamma=1.0) elif method == 'CLAHE': new_image = exposure.equalize_adapthist(image, clip_limit=0.01) new_image = img_as_uint(new_image) elif method == 'log': new_image = exposure.adjust_log(image) elif method == 'sigmoid': new_image = exposure.adjust_sigmoid(image, gain=4, cutoff=0.35) elif method == 'KyleWb2': new_image = KyleWB2(image) elif method == 'WB2': new_image = np.zeros_like(image) #Initialize final image for i in range(3): #each color channel bmin = np.where(cum_hist[i]>threshold)[0][0] bmax = np.where(cum_hist[i]>1-threshold)[0][0] new_image[...,i] = np.clip(image[...,i], bmin, bmax) new_image[...,i] = (new_image[...,i]-bmin) / (bmax - bmin) * 255 new_image = new_image.round().astype('uint8') print(name) imio.imwrite(path + name, new_image)
def bw_transform_special(img): #Tratamiento especial para imágenes image = rgb2gray(img) image = invert(image) image = adjust_gamma(image, 2) image = adjust_sigmoid(image, .95) threshold_image = threshold_otsu(image) image = image > threshold_image return image
def gammacorection(): image = load_a_picture() # ima = exposure.adjust_gamma(image, 2) ima = exposure.adjust_sigmoid(image, 0.8) add_to_stack(ima) show_img(actual_loaded) processing_menu()
def random_transform(self, x, seed=None): x = super(MyImageDataGenerator, self).random_transform(x, seed) if self.contrast_stretching_perc != 0.: contrast_p = np.random.uniform(1. - self.contrast_stretching_perc, 1. + self.contrast_stretching_perc) x = adjust_sigmoid(x.astype(np.float) / 255., cutoff=0.5, gain=5 * contrast_p, inv=False) x *= 255 return x
def plot_image(img, ax): from skimage import exposure h, w, _ = img.shape img = img[100:h - 100, 100:w - 100, :] plt.setp(ax.get_yticklabels(), visible=False) plt.setp(ax.get_xticklabels(), visible=False) ax.imshow(exposure.adjust_sigmoid(img, 0.40, 8)) ax.set_xticks([]) ax.set_yticks([]) return
def saturation_rectified_intensity(image): assert (type(image) is np.ndarray and image.dtype == np.uint8 and len( image.shape) == 3), "The input image has to be a uint8 2D numpy array." image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) saturation = img_as_float(image_hsv[:, :, 1]) intensity = img_as_float(image_hsv[:, :, 2]) adjust = adjust_sigmoid(saturation, 0.08, 25) signal = invert(intensity) image_out = invert(adjust * signal) return img_as_ubyte(image_out)
def callPeaks(lane, gain=7, hamming=5, filt=0.2, order=9): """ Identify peaks in the lane :Args: :param lane: The lane which to call peaks. :type lane: tapeAnalyst.gel_processing.GelLane :param gain: The gain value to use for increasing contrast (see skimage.exposure.adjust_sigmoid) :type gain: int :param hamming: The value to use Hamming convolution (see scipy.signal.hamming) :type gain: int :param filt: Remove all pixels whose intensity is below this value. :type filt: float :param order: The distance allowed for finding maxima (see scipy.signal.argrelmax) :type order: int """ # Increase contrast to help with peak calling ladj = exposure.adjust_sigmoid(lane.lane, cutoff=0.5, gain=gain) # Tack the max pixel intensity for each row in then lane's gel image. laneDist = ladj.max(axis=1) # Smooth the distribution laneDist = signal.convolve(laneDist, signal.hamming(hamming)) # Get the locations of the dye front and dye end. Peak calling is difficult # here because dyes tend to plateau. To aid peak calling, add an artificial # spike in dye regions. Also remove all peaks outside of the dyes try: dyeFrontPeak = int(np.ceil(np.mean([lane.dyeFrontStart, lane.dyeFrontEnd]))) laneDist[dyeFrontPeak] = laneDist[dyeFrontPeak] + 2 laneDist[dyeFrontPeak+1:] = 0 except: logger.warn('No Dye Front - Lane {}: {}'.format(lane.index, lane.wellID)) try: dyeEndPeak = int(np.ceil(np.mean([lane.dyeEndStart, lane.dyeEndEnd]))) laneDist[dyeEndPeak] = laneDist[dyeEndPeak] + 2 laneDist[:dyeEndPeak-1] = 0 except: logger.warn('No Dye End - Lane {}: {}'.format(lane.index, lane.wellID)) # Filter out low levels laneDist[laneDist < filt] = 0 # Find local maxima peaks = signal.argrelmax(laneDist, order=order)[0] return peaks
def m6(): Image.MAX_IMAGE_PIXELS = None for fn in os.listdir("all"): if fn.endswith("TIF"): adj_fn = fn.split("_B")[1] if len(adj_fn) == 5: adj_fn = "0" + adj_fn adj_fn = "A_" + adj_fn[:-4] print(adj_fn) image = io.imread("all/" + fn) print(image.shape) if fn.endswith("8.TIF"): pass region = image[3500 * 2:3900 * 2, 800 * 2:1200 * 2] else: region = image[3500:3900, 800:1200] region = transform.resize(region, (800, 800), anti_aliasing=False) io.imsave("edited/" + adj_fn + ".png", region) adhist = exposure.equalize_adapthist(region) io.imsave("edited/" + adj_fn + "adj_adhist.png", adhist) exp = exposure.adjust_sigmoid(adhist) io.imsave("edited/" + adj_fn + "adj_adsig.png", exp) #roberts, sobel, scharr, prewitt edges = filters.roberts(adhist) io.imsave("edited/" + adj_fn + "f_rob.png", edges) edges = filters.sobel(adhist) io.imsave("edited/" + adj_fn + "f_sob.png", edges) edges = filters.scharr(adhist) io.imsave("edited/" + adj_fn + "f_sch.png", edges) edges = filters.prewitt(adhist) io.imsave("edited/" + adj_fn + "f_prew.png", edges) adhist = exposure.equalize_hist(region) io.imsave("edited/" + adj_fn + "adj_hist.png", adhist) exp = exposure.adjust_sigmoid(adhist) io.imsave("edited/" + adj_fn + "adj_sig.png", exp)
def process_image(img, sig_param=.85): image = rgb2gray(img) image = pyramid_expand(image, order=3) theta = ImageSignalDigitalizer.skew_detect(image) image = rotate(image, np.rad2deg(theta) + ImageSignalDigitalizer.CONST_ANGLE, resize=True) image = invert(image) image = adjust_sigmoid(image, sig_param) image = ImageSignalDigitalizer.bw_transform(image) image = resize(image, (2048, 2048 * 2)) return image
def sigmoid(self): """sigmoid exposure added to image Parameters ---------- image : n-dimensional array Input image folder. Returns ------- Image with sigmoid exposure. """ return exposure.adjust_sigmoid(self)
def _augment(xs): """Image adjustment doesn't change image shape, but for intensity. Return: images: 4-d tensor with shape [depth, height, width, channels] """ # `xs` has shape [depth, height, width] with value in [0, 1]. brt_gamma, brt_gain = np.random.uniform(low=0.9, high=1.1, size=2) aj_bright = adjust_gamma(xs, brt_gamma, brt_gain) contrast_gain = np.random.uniform(low=5, high=10) aj_contrast = adjust_sigmoid(aj_bright, gain=contrast_gain) return aj_contrast
def run(self, imgin_path, imgout_path=None, increase_exposure=False): imgin_path = self.__expand_user(imgin_path) img = misc.imread(imgin_path) img_blurred = self.__blur(img) img = self.__divide(img, img_blurred) if increase_exposure: img = exposure.adjust_sigmoid(img) if not imgout_path: imgout_path = self.__add_suffix(imgin_path) misc.imsave(imgout_path, img) print("Saved to", imgout_path)
def rescale(initial_data, log_scale=True, method='adaptive_equalization'): norm = (initial_data-initial_data.min())/initial_data.max() exponent = 1000 log = np.log(exponent*norm+1)/np.log(exponent) if log_scale else norm if method == 'adaptive_equalization': return exposure.equalize_adapthist(log/log.max(), nbins=2048, kernel_size=64) elif method == 'adjust_sigmoid': return exposure.adjust_sigmoid(log/log.max(), cutoff=0.5, gain=20) elif method == 'global_equalization': return exposure.equalize_hist(log / log.max(), nbins=1024) elif method == 'local_equalization': return rank.equalize(log / log.max(), selem=disk(30 )) return
def test_adjust_sigmoid_cutoff_one(): """Verifying the output with expected results for sigmoid correction with cutoff equal to one and gain of 5""" image = np.arange(0, 255, 4, np.uint8).reshape((8, 8)) expected = np.array( [[1, 1, 1, 2, 2, 2, 2, 2], [3, 3, 3, 4, 4, 4, 5, 5], [5, 6, 6, 7, 7, 8, 9, 10], [10, 11, 12, 13, 14, 15, 16, 18], [19, 20, 22, 24, 25, 27, 29, 32], [34, 36, 39, 41, 44, 47, 50, 54], [57, 61, 64, 68, 72, 76, 80, 85], [89, 94, 99, 104, 108, 113, 118, 123]], dtype=np.uint8) result = exposure.adjust_sigmoid(image, 1, 5) assert_array_equal(result, expected)
def create_dataset(): dataset = [] os.chdir("Car") img_list = os.listdir() for img in img_list: image = io.imread(img) img_gray = color.rgb2gray(image) img_contrast = exposure.adjust_sigmoid(img_gray, cutoff=0.5, gain=100, inv=False) dataset.append((image, img_contrast)) os.chdir(os.getcwd() + "/../") return dataset
def augmentations(image_array: ndarray): v_min, v_max = np.percentile(image_array, (0.2, 99.8)) return ( image_array, transform.rotate(image_array, random.uniform(-50, 50)), exposure.rescale_intensity(image_array, in_range=(v_min, v_max)), util.random_noise(image_array), ndimage.gaussian_filter(image_array, 2), exposure.adjust_log(image_array), exposure.adjust_sigmoid(image_array), #color.rgb2gray(image_array), (FOR COLORED IMAGES) #np.invert(image_array), (FOR COLORED IMAGES) exposure.adjust_gamma(image_array, gamma=0.4, gain=0.9), image_array[:, ::-1], image_array[::-1, :])
def adjust(frame, method, **kwargs): if method == "equalize": adjusted = exp.equalize_hist(frame, **kwargs) elif method == "gamma": adjusted = exp.adjust_gamma(frame, **kwargs) elif method == "log": adjusted = exp.adjust_log(frame, **kwargs) elif method == "sigmoid": adjusted = exp.adjust_sigmoid(frame, **kwargs) elif method == "adaptive": adjusted = exp.equalize_adapthist(frame, **kwargs) else: raise ValueError( "method can be equalize, gamma, log, sigmoid or adaptive") return adjusted
def test_adjust_inv_sigmoid_cutoff_half(): """Verifying the output with expected results for inverse sigmoid correction with cutoff equal to half and gain of 10""" image = np.arange(0, 255, 4, np.uint8).reshape((8, 8)) expected = np.array( [[253, 253, 252, 252, 251, 251, 250, 249], [249, 248, 247, 245, 244, 242, 240, 238], [235, 232, 229, 225, 220, 215, 210, 204], [197, 190, 182, 174, 165, 155, 146, 136], [126, 116, 106, 96, 87, 78, 70, 62], [55, 49, 43, 37, 33, 28, 25, 21], [18, 16, 14, 12, 10, 8, 7, 6], [5, 4, 4, 3, 3, 2, 2, 1]], dtype=np.uint8) result = exposure.adjust_sigmoid(image, 0.5, 10, True) assert_array_equal(result, expected)
def test_adjust_sigmoid_cutoff_one(): """Verifying the output with expected results for sigmoid correction with cutoff equal to one and gain of 5""" image = np.arange(0, 255, 4, np.uint8).reshape(8,8) expected = np.array([[ 1, 1, 1, 2, 2, 2, 2, 2], [ 3, 3, 3, 4, 4, 4, 5, 5], [ 5, 6, 6, 7, 7, 8, 9, 10], [ 10, 11, 12, 13, 14, 15, 16, 18], [ 19, 20, 22, 24, 25, 27, 29, 32], [ 34, 36, 39, 41, 44, 47, 50, 54], [ 57, 61, 64, 68, 72, 76, 80, 85], [ 89, 94, 99, 104, 108, 113, 118, 123]], dtype=np.uint8) result = exposure.adjust_sigmoid(image, 1, 5) assert_array_equal(result, expected)
def test_adjust_sigmoid_cutoff_zero(): """Verifying the output with expected results for sigmoid correction with cutoff equal to zero and gain of 10""" image = np.arange(0, 255, 4, np.uint8).reshape(8,8) expected = np.array([[127, 137, 147, 156, 166, 175, 183, 191], [198, 205, 211, 216, 221, 225, 229, 232], [235, 238, 240, 242, 244, 245, 247, 248], [249, 250, 250, 251, 251, 252, 252, 253], [253, 253, 253, 253, 254, 254, 254, 254], [254, 254, 254, 254, 254, 254, 254, 254], [254, 254, 254, 254, 254, 254, 254, 254], [254, 254, 254, 254, 254, 254, 254, 254]], dtype=np.uint8) result = exposure.adjust_sigmoid(image, 0, 10) assert_array_equal(result, expected)
def test_adjust_sigmoid_cutoff_half(): """Verifying the output with expected results for sigmoid correction with cutoff equal to half and gain of 10""" image = np.arange(0, 255, 4, np.uint8).reshape(8,8) expected = np.array([[ 1, 1, 2, 2, 3, 3, 4, 5], [ 5, 6, 7, 9, 10, 12, 14, 16], [ 19, 22, 25, 29, 34, 39, 44, 50], [ 57, 64, 72, 80, 89, 99, 108, 118], [128, 138, 148, 158, 167, 176, 184, 192], [199, 205, 211, 217, 221, 226, 229, 233], [236, 238, 240, 242, 244, 246, 247, 248], [249, 250, 250, 251, 251, 252, 252, 253]], dtype=np.uint8) result = exposure.adjust_sigmoid(image, 0.5, 10) assert_array_equal(result, expected)
def test_adjust_inv_sigmoid_cutoff_half(): """Verifying the output with expected results for inverse sigmoid correction with cutoff equal to half and gain of 10""" image = np.arange(0, 255, 4, np.uint8).reshape(8,8) expected = np.array([[253, 253, 252, 252, 251, 251, 250, 249], [249, 248, 247, 245, 244, 242, 240, 238], [235, 232, 229, 225, 220, 215, 210, 204], [197, 190, 182, 174, 165, 155, 146, 136], [126, 116, 106, 96, 87, 78, 70, 62], [ 55, 49, 43, 37, 33, 28, 25, 21], [ 18, 16, 14, 12, 10, 8, 7, 6], [ 5, 4, 4, 3, 3, 2, 2, 1]], dtype=np.uint8) result = exposure.adjust_sigmoid(image, 0.5, 10, True) assert_array_equal(result, expected)
def test_adjust_sigmoid_cutoff_half(): """Verifying the output with expected results for sigmoid correction with cutoff equal to half and gain of 10""" image = np.arange(0, 255, 4, np.uint8).reshape((8, 8)) expected = np.array( [[1, 1, 2, 2, 3, 3, 4, 5], [5, 6, 7, 9, 10, 12, 14, 16], [19, 22, 25, 29, 34, 39, 44, 50], [57, 64, 72, 80, 89, 99, 108, 118], [128, 138, 148, 158, 167, 176, 184, 192], [199, 205, 211, 217, 221, 226, 229, 233], [236, 238, 240, 242, 244, 246, 247, 248], [249, 250, 250, 251, 251, 252, 252, 253]], dtype=np.uint8) result = exposure.adjust_sigmoid(image, 0.5, 10) assert_array_equal(result, expected)
def pre_image_processing(resized_image): equal_adapt_hist_image = exposure.equalize_adapthist(resized_image) rescale_intensity_image = exposure.rescale_intensity(equal_adapt_hist_image) adjust_sigmoid_image = exposure.adjust_sigmoid(rescale_intensity_image) gray_scale_image = rgb2gray(adjust_sigmoid_image) mean_image = mean(gray_scale_image, disk(1)) mean_image = mean(mean_image, disk(1)) mean_image = mean(mean_image, disk(1)) median_image = dilation(median(mean_image, disk(1)), square(2)) otsu_image = filters.threshold_otsu(median_image) closing_image = closing(median_image > otsu_image, square(1)) # opening_image = opening(closing_image, square(2)) opening_image = invert(closing_image) return opening_image
def test_adjust_sigmoid_cutoff_zero(): """Verifying the output with expected results for sigmoid correction with cutoff equal to zero and gain of 10""" image = np.arange(0, 255, 4, np.uint8).reshape((8, 8)) expected = np.array([[127, 137, 147, 156, 166, 175, 183, 191], [198, 205, 211, 216, 221, 225, 229, 232], [235, 238, 240, 242, 244, 245, 247, 248], [249, 250, 250, 251, 251, 252, 252, 253], [253, 253, 253, 253, 254, 254, 254, 254], [254, 254, 254, 254, 254, 254, 254, 254], [254, 254, 254, 254, 254, 254, 254, 254], [254, 254, 254, 254, 254, 254, 254, 254]], dtype=np.uint8) result = exposure.adjust_sigmoid(image, 0, 10) assert_array_equal(result, expected)
def image_transformation(X, method_type='blur', **kwargs): # https://www.kaggle.com/tomahim/image-manipulation-augmentation-with-skimage q = kwargs['percentile'] if 'percentile' in kwargs else (0.2, 99.8) angle = kwargs['angle'] if 'angle' in kwargs else 60 transformation_dict = { 'blur': normalize(ndimage.uniform_filter(X)), 'invert': normalize(util.invert(X)), 'rotate': rotate(X, angle=angle), 'rescale_intensity': _rescale_intensity(X, q=q), 'gamma_correction': exposure.adjust_gamma(X, gamma=0.4, gain=0.9), 'log_correction': exposure.adjust_log(X), 'sigmoid_correction': exposure.adjust_sigmoid(X), 'horizontal_flip': X[:, ::-1], 'vertical_flip': X[::-1, :], 'rgb2gray': skimage.color.rgb2gray(X) } return transformation_dict[method_type]
def intensity_correction(img): mean_val = np.mean(img) mean_val = mean_val / 255. #print(mean_val) #sigmoid correction img2 = exp.adjust_sigmoid(img, cutoff = mean_val, gain=5) #draw(img2, "Sigmoid correction", 4, 3, 4, 4, 3, 5) #robust linear correction left, right = np.percentile(img2, (10, 90)) img3 = exp.rescale_intensity(img2, in_range=(left, right)) #draw(img3, "Linear correction", 4, 3, 7, 4, 3, 8) img4 = filters.gaussian(img3, sigma=.5) #draw(img4, "Gaussian filter", 4, 3, 10, 4, 3, 11) return img4
def image_equalization(image_file): ''' in testing ''' # Load an example image img = io.imread(image_file) #img = data.moon() #img = 'new.jpg' # Contrast stretching p2, p98 = np.percentile(img, (2, 98)) img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98)) img_rescale = exposure.adjust_sigmoid(img_rescale, 0.5, 5) # Equalization img_eq = exposure.equalize_hist(img) # Adaptive Equalization img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03) ''' cv2.imshow('image', img) cv2.waitKey(0) ''' cv2.imshow('image', img_rescale) cv2.waitKey(0) ''' cv2.imshow('image', img_eq) cv2.waitKey(0) cv2.imshow('image', img_adapteq) cv2.waitKey(0) ''' cv2.imwrite('new_adj.jpg', img_rescale) ''' cv2.imwrite('new_adj2.jpg', img_eq) cv2.imwrite('new_adj3.jpg', img_adapteq) ''' return ['new_adj.jpg', 1, 2] #'new_adj2.jpg', 'new_adj3.jpg']
# <codecell> # Original image A = io.imread("../data/" + l[2]) io.imshow(A) # <codecell> # Colour Deconvolution deconv = ski.img_as_float(color.separate_stains(A, np.linalg.inv(qstain))) io.imshow(exposure.adjust_sigmoid( filter.gaussian_filter( exposure.rescale_intensity( deconv[:, :, 0] + deconv[:, :, 2], out_range=(0, 1)), 17), gain=7, cutoff=0.6)) # <codecell> # Blood blood = \ morphology.remove_small_objects( filter.threshold_adaptive( exposure.adjust_sigmoid( filter.gaussian_filter( exposure.rescale_intensity( deconv[:, :, 0] + deconv[:, :, 2],
def contrast_enhance(img): return adjust_sigmoid(img, cutoff=0.5, gain=10)
offset = 1000 lengthscale = 301 image = exposure.equalize_adapthist(io.imread(files[4])) #image = io.imread(files[4]) io.imshow(image) plt.grid(False) # <codecell> #binary = np.logical_or(filter.threshold_adaptive(exposure.adjust_sigmoid(image[:, :, 0], cutoff=0.4, gain=20), lengthscale), \ # filter.threshold_adaptive(exposure.adjust_sigmoid(image[:, :, 2], cutoff=0.5, gain=20), lengthscale)) binary = filter.threshold_adaptive(exposure.adjust_sigmoid(image[:, :, 0], cutoff=0.4, gain = 30), 301).astype(bool) clean = morphology.binary_closing(binary, morphology.disk(3)).astype(bool) clean = morphology.remove_small_objects(clean, 200) clean = morphology.remove_small_objects( (1-clean).astype(bool), 200) io.imshow(clean) plt.grid(False) # <codecell> (xdim, ydim, _) = image.shape xdim /= 10 ydim /= 10
# <codecell> A = io.imread("../data/" + l[5]) io.imshow(A) # <codecell> #B = exposure.adjust_sigmoid(filter.gaussian_filter(A[:, :, 0], 19), cutoff=.45, gain=15) B = exposure.adjust_sigmoid( filter.gaussian_filter( exposure.rescale_intensity( color.separate_stains( A, np.linalg.inv(qstain)), out_range=(0, 1))[:, :, 1], 29), cutoff=.35, gain=20) io.imshow(B) # <codecell> #b = morphology.remove_small_objects(filter.threshold_adaptive(filter.gaussian_filter(exposure.adjust_sigmoid(A[:,:,1]), 31), 501, offset=-0.05), 2000) C = morphology.remove_small_objects( filter.threshold_adaptive(B, 301, offset=-0.025), 4000) #io.imshow(morphology.binary_closing(np.logical_or(morphology.binary_dilation(C, morphology.disk(11)), b), morphology.disk(31))) io.imshow(C)
cov_point_density = [] var_area_density = [] cov_area_density = [] fs_area = [] pairwise = [] image = exposure.equalize_adapthist(A) io.imshow(image) plt.grid(False) # <codecell> # Process for nuclei binary = filter.threshold_adaptive(exposure.adjust_sigmoid(A[:, :, 0], cutoff=0.4, gain = 30), 301).astype(bool) clean = morphology.binary_closing(binary, morphology.disk(3)).astype(bool) clean = morphology.remove_small_objects(clean, 200) clean = morphology.remove_small_objects( (1-clean).astype(bool), 200) io.imshow(clean) plt.grid(False) # <codecell> # Find contour of inflammatory zone local_density = filter.gaussian_filter(clean, 61) local_density -= local_density.min() local_density /= local_density.max()
def sigmoid_transform(img, cutoff=0.5): return exposure.adjust_sigmoid(img, cutoff)
c.z_sun # In[ ]: # In[ ]: cube.imshow(data=out) # In[ ]: out2 = adjust_sigmoid(out) # In[ ]: equal = equalize_adapthist(out2) # In[ ]: cube.imshow(equal) # In[ ]: meta = pd.read_csv('/Users/klay6683/Dropbox/DDocuments/UVIS/straws/coiss_ahires.015.list.txt',
for k in range(0,len(image_file_list),n): X = [] Y = [] for fname in image_file_list[k:k+n]: label = label_dict[fname.split('.')[0]] cur_img = imread(folder+'/'+fname , as_grey=True) cur_img = 1 - cur_img # randomly add samples r_for_eq = random() if r_for_eq<0.3: cur_img = equalize_adapthist(cur_img,ntiles_x=5,ntiles_y=5,clip_limit=0.1) if 0.3<r_for_eq<0.4: cur_img = adjust_sigmoid(cur_img,cutoff=0.5, gain=10, inv=False) if 0.5<r_for_eq<0.6: cur_img = adjust_gamma(cur_img,gamma=0.5, gain=1) X.append([cur_img.tolist()]) label_vec = [0]*5 label_vec[label] = 1 """ label_vec = [0]*3 if label == 0 or label == 1: label_vec[0] = 1 elif label == 2 or label == 3 : label_vec[1] = 1 else:
# There are plenty of ways of combining the different grey-level images into a # false-color representation (it's a bit of an art!). This is a very simple # pipeline, that mainly tweaks intensities, and that does nothing fancy along # the lines of denoising, sharpening, etc. import numpy as np import matplotlib.pyplot as plt from skimage import img_as_float, io, exposure ic = io.ImageCollection('m8_050507_*.png') ic = [img_as_float(img) for img in ic] H, B, G, R, L = ic H = exposure.adjust_sigmoid(H, cutoff=0.05, gain=35) L = exposure.adjust_sigmoid(L, cutoff=0.05, gain=15) R = exposure.adjust_gamma(R, 0.1) # Merge R, G, B channels out = np.dstack((H, L, R)) out = exposure.adjust_gamma(out, 2.1) io.imsave('m8_recon.png', out) f, ax = plt.subplots() ax.imshow(out) plt.show()
27) inflammation = \ maximum_filter( morphology.remove_small_objects( filter.threshold_adaptive( exposure.adjust_sigmoid( filter.gaussian_filter( exposure.equalize_adapthist( exposure.rescale_intensity( deconv[:, :, 1], out_range = (0, 1)), ntiles_y = 1), 5), cutoff = 0.6), 75, offset = -0.12), 250), 29) # Labelled
print files # <codecell> # <codecell> A = io.imread(files[0]) As = transform.rescale(A, 0.25) io.imshow(A) plt.grid(False) # <codecell> #B = exposure.adjust_sigmoid(A, gain=12) Bs = exposure.adjust_sigmoid(ski.img_as_float(As), gain=12) #io.imshow(B - exposure.adjust_sigmoid(ski.img_as_float(A), gain=12)) # <codecell> #C = color.rgb2xyz(B)[:, :, 1] Cs = color.rgb2xyz(Bs)[:, :, 1] io.imshow(Cs) plt.grid(0) # <codecell> #D = filter.threshold_adaptive(C, 301) Ds = filter.threshold_adaptive(Cs, 75) io.imshow(Ds) plt.grid(0)
def _find_patches(self, class_number, per_class): """ this function in dependence of the class number search for patches with the edge pattern :param per_class: number of patches to find :param class_number the class number :return: a numpy array of patches and a numpy array of labels """ print() patches = [] labels = np.ones( per_class * self.augmentation_multiplier ) * class_number ten_percent_black = 0 ten_percent_black_value = int( float( per_class ) * 0.0001 ) start_value_extraction = 0 full = False if isdir( 'patches/' ) and isdir( 'patches/sigma_{}/'.format( self.sigma ) ) and isdir( 'patches/sigma_{}/class_{}/'.format( self.sigma, class_number ) ): # load all patches # check if quantity is enough to work path_to_patches = sorted( glob( './patches/sigma_{}/class_{}/**.png'.format( self.sigma, class_number ) ), key=get_right_order ) for path_index in xrange( len( path_to_patches ) ): if path_index < per_class: patch_to_load = np.array( rgb2gray( imread( path_to_patches[path_index], dtype=float ) ).reshape( 3, self.patch_size[0], self.patch_size[1] ) ).astype( float ) patches.append( patch_to_load ) for el in xrange( len( patch_to_load ) ): if np.max( patch_to_load[el] ) > 1: patch_to_load[el] /= np.max( patch_to_load[el] ) print( '*---> patch {}/{} loaded and added '.format( path_index, per_class ) ) else: full = True break if len( path_to_patches ) < per_class: # change start_value_extraction start_value_extraction = len( path_to_patches ) else: full = True else: mkdir_p( 'patches/sigma_{}/class_{}'.format( self.sigma, class_number ) ) patch_to_extract = 25000 if not full: for i in range( start_value_extraction, per_class ): extracted = False random_image = self.images[randint( 0, len( self.images ) - 1 )] while np.array_equal( random_image, np.zeros( random_image.shape ) ): random_image = self.images[randint( 0, len( self.images ) - 1 )] patches_from_random = np.array( extract_patches_2d( random_image, self.patch_size, patch_to_extract ) ) counter = 0 while not extracted: if counter > per_class / 2: random_image = self.images[randint( 0, len( self.images ) - 1 )] patches_from_random = np.array( extract_patches_2d( random_image, self.patch_size, patch_to_extract ) ) counter = 0 patch = np.array( patches_from_random[randint( 0, patch_to_extract - 1 )].astype( float ) ) if patch.max() > 1: patch = normalize( patch ) patch_1 = adjust_sigmoid( patch ) edges_1 = adjust_sigmoid( patch, inv=True ) edges_2 = patch_1 edges_5_n = normalize( laplace( patch ) ) edges_5_n = img_as_float( img_as_ubyte( edges_5_n ) ) choosing_cond = is_boarder( patch=patch_1, sigma=self.sigma ) if class_number == 1 and choosing_cond: final_patch = np.array( [edges_1, edges_2, edges_5_n] ) patches.append( final_patch ) try: imsave( './patches/sigma_{}/class_{}/{}.png'.format( self.sigma, class_number, i ), final_patch.reshape( (3 * self.patch_size[0], self.patch_size[1]) ), dtype=float ) except: print( 'problem occurred in save for class {}'.format( class_number ) ) exit( 0 ) print( '*---> patch {}/{} added and saved '.format( i, per_class ) ) extracted = True elif class_number == 0 and not choosing_cond: if np.array_equal( patch, np.zeros( patch.shape ) ): if ten_percent_black < ten_percent_black_value: final_patch = np.array( [patch, edges_2, edges_5_n] ) patches.append( final_patch ) try: imsave( './patches/sigma_{}/class_{}/{}.png'.format( self.sigma, class_number, i ), final_patch.reshape( (3 * self.patch_size[0], self.patch_size[1]) ), dtype=float ) except: print( 'problem occurred in save for class {}'.format( class_number ) ) exit( 0 ) print( '*---> patch {}/{} added and saved '.format( i, per_class ) ) ten_percent_black += 1 extracted = True else: pass else: final_patch = np.array( [edges_1, edges_2, edges_5_n] ) patches.append( final_patch ) try: imsave( './patches/sigma_{}/class_{}/{}.png'.format( self.sigma, class_number, i ), final_patch.reshape( (3 * self.patch_size[0], self.patch_size[1]) ), dtype=float ) except: print( 'problem occurred in save for class {}'.format( class_number ) ) exit( 0 ) print( '*---> patch {}/{} added and saved '.format( i, per_class ) ) extracted = True counter += 1 if self.augmentation_angle != 0: print( "\n *_*_*_*_* proceeding with data augmentation for class {} *_*_*_*_* \n".format( class_number ) ) if isdir( './patches/sigma_{}/class_{}/rotations'.format( self.sigma, class_number ) ): print( "rotations folder present " ) else: mkdir_p( './patches/sigma_{}/class_{}/rotations'.format( self.sigma, class_number ) ) print( "rotations folder created" ) for el_index in xrange( len( patches ) ): for j in range( 1, self.augmentation_multiplier ): try: patch_rotated = np.array( rgb2gray( imread( ('./patches/sigma_{}/class_{}/' 'rotations/{}_{}.png'.format( self.sigma, class_number, el_index, self.augmentation_angle * j )) ) ).reshape( 3, self.patch_size[ 0], self.patch_size[ 1] ) ).astype( float ) / ( 256 * 256) patches.append( patch_rotated ) print( '*---> patch {}/{} loaded and added ' 'with rotation of {} degrees'.format( el_index, per_class, self.augmentation_angle * j ) ) except: final_rotated_patch = rotate_patches( patches[el_index][0], patches[el_index][1], patches[el_index][2], self.augmentation_angle * j ) patches.append( final_rotated_patch ) imsave( './patches/sigma_{}/class_{}/' 'rotations/{}_{}.png'.format( self.sigma, class_number, el_index, self.augmentation_angle * j ), final_rotated_patch.reshape( 3 * self.patch_size[0], self.patch_size[1] ), dtype=float ) print( ('*---> patch {}/{} saved and added ' 'with rotation of {} degrees '.format( el_index, per_class, self.augmentation_angle * j )) ) print() print( 'augmentation done \n' ) print( 'extraction for class {} complete\n'.format( class_number ) ) return np.array( patches ), labels