def test_windowed_histogram(): # check the number of valid pixels in the neighborhood image8 = np.array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]], dtype=np.uint8) elem = np.ones((3, 3), dtype=np.uint8) outf = np.empty(image8.shape + (2, ), dtype=float) mask = np.ones(image8.shape, dtype=np.uint8) # Population so we can normalize the expected output while maintaining # code readability pop = np.array([[4, 6, 6, 6, 4], [6, 9, 9, 9, 6], [6, 9, 9, 9, 6], [6, 9, 9, 9, 6], [4, 6, 6, 6, 4]], dtype=float) r0 = np.array([[3, 4, 3, 4, 3], [4, 5, 3, 5, 4], [3, 3, 0, 3, 3], [4, 5, 3, 5, 4], [3, 4, 3, 4, 3]], dtype=float) / pop r1 = np.array([[1, 2, 3, 2, 1], [2, 4, 6, 4, 2], [3, 6, 9, 6, 3], [2, 4, 6, 4, 2], [1, 2, 3, 2, 1]], dtype=float) / pop rank.windowed_histogram(image=image8, selem=elem, out=outf, mask=mask) assert_equal(r0, outf[:, :, 0]) assert_equal(r1, outf[:, :, 1]) # Test n_bins parameter larger_output = rank.windowed_histogram(image=image8, selem=elem, mask=mask, n_bins=5) assert larger_output.shape[2] == 5
def test_windowed_histogram(): # check the number of valid pixels in the neighborhood image8 = np.array( [[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]], dtype=np.uint8 ) elem = np.ones((3, 3), dtype=np.uint8) outf = np.empty(image8.shape + (2,), dtype=float) mask = np.ones(image8.shape, dtype=np.uint8) # Population so we can normalize the expected output while maintaining # code readability pop = np.array([[4, 6, 6, 6, 4], [6, 9, 9, 9, 6], [6, 9, 9, 9, 6], [6, 9, 9, 9, 6], [4, 6, 6, 6, 4]], dtype=float) r0 = ( np.array([[3, 4, 3, 4, 3], [4, 5, 3, 5, 4], [3, 3, 0, 3, 3], [4, 5, 3, 5, 4], [3, 4, 3, 4, 3]], dtype=float) / pop ) r1 = ( np.array([[1, 2, 3, 2, 1], [2, 4, 6, 4, 2], [3, 6, 9, 6, 3], [2, 4, 6, 4, 2], [1, 2, 3, 2, 1]], dtype=float) / pop ) rank.windowed_histogram(image=image8, selem=elem, out=outf, mask=mask) assert_equal(r0, outf[:, :, 0]) assert_equal(r1, outf[:, :, 1]) # Test n_bins parameter larger_output = rank.windowed_histogram(image=image8, selem=elem, mask=mask, n_bins=5) assert larger_output.shape[2] == 5
def get_binned_ori(phase, selem, bin=None): if bin is None: n_bins = 12 else: n_bins = 100 grad, mag, ori = get_gradient(phase) px_histograms = rank.windowed_histogram(grad / num.max(grad), selem, n_bins=4) px_histograms = num.sum(px_histograms, axis=2) px_histogramsori = rank.windowed_histogram(ori / 10, selem, n_bins=n_bins) px_histogramsori = num.sum(px_histogramsori, axis=2) return px_histograms, px_histogramsori
def get_binned_cont(phase, selem): phase_bin = get_contours(phase) img = phase_bin grad, mag, ori = get_gradient(img) px_histograms = rank.windowed_histogram(grad / num.max(grad), selem, n_bins=4) px_histograms = num.sum(px_histograms, axis=2) px_histogramsori = rank.windowed_histogram(ori / 10, selem, n_bins=4) px_histogramsori = num.sum(px_histogramsori, axis=2) return px_histograms, px_histogramsori, phase_bin
def windowed_histogram_similarity(image, selem, reference_hist, n_bins): # Compute normalized windowed histogram feature vector for each pixel px_histograms = rank.windowed_histogram(image, selem, n_bins=n_bins) # Reshape coin histogram to (1,1,N) for broadcast when we want to use it in # arithmetic operations with the windowed histograms from the image reference_hist = reference_hist.reshape((1, 1) + reference_hist.shape) # Compute Chi squared distance metric: sum((X-Y)^2 / (X+Y)); # a measure of distance between histograms X = px_histograms Y = reference_hist num = (X - Y) ** 2 denom = X + Y denom[denom == 0] = np.infty frac = num / denom chi_sqr = 0.5 * np.sum(frac, axis=2) # Generate a similarity measure. It needs to be low when distance is high # and high when distance is low; taking the reciprocal will do this. # Chi squared will always be >= 0, add small value to prevent divide by 0. similarity = 1 / (chi_sqr + 1.0e-4) return similarity
def windowed_histogram_similarity(image, footprint, reference_hist, n_bins): # Compute normalized windowed histogram feature vector for each pixel px_histograms = rank.windowed_histogram(image, footprint, n_bins=n_bins) # Reshape coin histogram to (1,1,N) for broadcast when we want to use it in # arithmetic operations with the windowed histograms from the image reference_hist = reference_hist.reshape((1, 1) + reference_hist.shape) # Compute Chi squared distance metric: sum((X-Y)^2 / (X+Y)); # a measure of distance between histograms X = px_histograms Y = reference_hist num = (X - Y)**2 denom = X + Y denom[denom == 0] = np.infty frac = num / denom chi_sqr = 0.5 * np.sum(frac, axis=2) # Generate a similarity measure. It needs to be low when distance is high # and high when distance is low; taking the reciprocal will do this. # Chi squared will always be >= 0, add small value to prevent divide by 0. similarity = 1 / (chi_sqr + 1.0e-4) return similarity
def check_all(): selem = morphology.disk(1) refs = np.load( os.path.join(skimage.data_dir, "rank_filter_tests.npz")) assert_equal(refs["autolevel"], rank.autolevel(self.image, selem)) assert_equal(refs["autolevel_percentile"], rank.autolevel_percentile(self.image, selem)) assert_equal(refs["bottomhat"], rank.bottomhat(self.image, selem)) assert_equal(refs["equalize"], rank.equalize(self.image, selem)) assert_equal(refs["gradient"], rank.gradient(self.image, selem)) assert_equal(refs["gradient_percentile"], rank.gradient_percentile(self.image, selem)) assert_equal(refs["maximum"], rank.maximum(self.image, selem)) assert_equal(refs["mean"], rank.mean(self.image, selem)) assert_equal(refs["geometric_mean"], rank.geometric_mean(self.image, selem)), assert_equal(refs["mean_percentile"], rank.mean_percentile(self.image, selem)) assert_equal(refs["mean_bilateral"], rank.mean_bilateral(self.image, selem)) assert_equal(refs["subtract_mean"], rank.subtract_mean(self.image, selem)) assert_equal(refs["subtract_mean_percentile"], rank.subtract_mean_percentile(self.image, selem)) assert_equal(refs["median"], rank.median(self.image, selem)) assert_equal(refs["minimum"], rank.minimum(self.image, selem)) assert_equal(refs["modal"], rank.modal(self.image, selem)) assert_equal(refs["enhance_contrast"], rank.enhance_contrast(self.image, selem)) assert_equal(refs["enhance_contrast_percentile"], rank.enhance_contrast_percentile(self.image, selem)) assert_equal(refs["pop"], rank.pop(self.image, selem)) assert_equal(refs["pop_percentile"], rank.pop_percentile(self.image, selem)) assert_equal(refs["pop_bilateral"], rank.pop_bilateral(self.image, selem)) assert_equal(refs["sum"], rank.sum(self.image, selem)) assert_equal(refs["sum_bilateral"], rank.sum_bilateral(self.image, selem)) assert_equal(refs["sum_percentile"], rank.sum_percentile(self.image, selem)) assert_equal(refs["threshold"], rank.threshold(self.image, selem)) assert_equal(refs["threshold_percentile"], rank.threshold_percentile(self.image, selem)) assert_equal(refs["tophat"], rank.tophat(self.image, selem)) assert_equal(refs["noise_filter"], rank.noise_filter(self.image, selem)) assert_equal(refs["entropy"], rank.entropy(self.image, selem)) assert_equal(refs["otsu"], rank.otsu(self.image, selem)) assert_equal(refs["percentile"], rank.percentile(self.image, selem)) assert_equal(refs["windowed_histogram"], rank.windowed_histogram(self.image, selem))
def compute_histogram(events, bins, selem): xs = np.array([event.x for event in events]) x1, x2 = zip(*xs) h, _, _ = np.histogram2d(x1, x2, bins=bins, density=False) # , bins=(xedges, yedges)) hist_img = windowed_histogram(h.astype(np.uint8), selem) hist_img_max = h.copy() for ix, iy in np.ndindex(hist_img_max.shape): hist_img_max[ix, iy] = np.sum([ NORMALIZATION_FACTOR * i_elem * elem for i_elem, elem in enumerate(hist_img[ix][iy]) ]) h = hist_img_max.T # h.T return h
def majority_vote(a, iterations=1, structure=np.ones((3, 3))): """Changes cell values to the most frequent value in its neighborhood. Args: a (ndarray): 2D ndarray. Possible a MaskedArray. iterations (int, optional): Number of times to repeat the process. Defaults to 1. structure (ndarray, optional): The neighborhood expressed as a 2-D array of 1’s and 0’s. Defaults to np.ones((3, 3)) which is 8-connectedness. Returns: ndarray: 2D ndarray of same dimensions as input array. MaskedArray if input is masked. """ nodata = None assert a.dtype == "uint8", "Majority vote only works for uint8" if np.ma.is_masked(a): # windowed_histogram does not work with masked arrays nodata = np.max(a) + 1 a = a.filled(nodata) for _ in range(iterations): a = rank.windowed_histogram(a, structure).argmax(axis=-1).astype("uint8") return np.ma.masked_values(a, nodata) if nodata is not None else a
def check_all(): np.random.seed(0) image = np.random.rand(25, 25) selem = morphology.disk(1) refs = np.load(os.path.join(skimage.data_dir, "rank_filter_tests.npz")) assert_equal(refs["autolevel"], rank.autolevel(image, selem)) assert_equal(refs["autolevel_percentile"], rank.autolevel_percentile(image, selem)) assert_equal(refs["bottomhat"], rank.bottomhat(image, selem)) assert_equal(refs["equalize"], rank.equalize(image, selem)) assert_equal(refs["gradient"], rank.gradient(image, selem)) assert_equal(refs["gradient_percentile"], rank.gradient_percentile(image, selem)) assert_equal(refs["maximum"], rank.maximum(image, selem)) assert_equal(refs["mean"], rank.mean(image, selem)) assert_equal(refs["mean_percentile"], rank.mean_percentile(image, selem)) assert_equal(refs["mean_bilateral"], rank.mean_bilateral(image, selem)) assert_equal(refs["subtract_mean"], rank.subtract_mean(image, selem)) assert_equal(refs["subtract_mean_percentile"], rank.subtract_mean_percentile(image, selem)) assert_equal(refs["median"], rank.median(image, selem)) assert_equal(refs["minimum"], rank.minimum(image, selem)) assert_equal(refs["modal"], rank.modal(image, selem)) assert_equal(refs["enhance_contrast"], rank.enhance_contrast(image, selem)) assert_equal(refs["enhance_contrast_percentile"], rank.enhance_contrast_percentile(image, selem)) assert_equal(refs["pop"], rank.pop(image, selem)) assert_equal(refs["pop_percentile"], rank.pop_percentile(image, selem)) assert_equal(refs["pop_bilateral"], rank.pop_bilateral(image, selem)) assert_equal(refs["sum"], rank.sum(image, selem)) assert_equal(refs["sum_bilateral"], rank.sum_bilateral(image, selem)) assert_equal(refs["sum_percentile"], rank.sum_percentile(image, selem)) assert_equal(refs["threshold"], rank.threshold(image, selem)) assert_equal(refs["threshold_percentile"], rank.threshold_percentile(image, selem)) assert_equal(refs["tophat"], rank.tophat(image, selem)) assert_equal(refs["noise_filter"], rank.noise_filter(image, selem)) assert_equal(refs["entropy"], rank.entropy(image, selem)) assert_equal(refs["otsu"], rank.otsu(image, selem)) assert_equal(refs["percentile"], rank.percentile(image, selem)) assert_equal(refs["windowed_histogram"], rank.windowed_histogram(image, selem))
def test_majority(self): img = data.camera() elem = np.ones((3, 3), dtype=np.uint8) expected = rank.windowed_histogram(img, elem).argmax(-1).astype(np.uint8) assert_equal(expected, rank.majority(img, elem))
def draw_composition(Xs, ys, result_path, out_shape=None, agg_shape=None, drawing_subsamples=False, sup_title=None, validation=None): if not out_shape: out_shape = (1, len(ys)) if not agg_shape: agg_shape = (1, 1) estimated_series_size = agg_shape[1] * out_shape[1] assert (agg_shape[0] * agg_shape[1] * out_shape[0] * out_shape[1] is len(ys)) len_x_vector = len(Xs) assert (len_x_vector == len(ys)) fig_rows, fig_cols = out_shape validation_correct = parse_validation(validation) if validation_correct: fig_cols += 1 fig, axes = plt.subplots(nrows=fig_rows, ncols=fig_cols, sharex=True, sharey=True, squeeze=False) mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) mng.window.state('zoomed') axes1d = axes.flatten() r = np.arange(len(ys)).reshape(out_shape[0] * agg_shape[0], out_shape[1] * agg_shape[1]) a1, a2 = agg_shape indices = [ r[a1 * i:a1 * (i + 1), a2 * j:a2 * (j + 1)].reshape(-1) for i, j in product(range(out_shape[0]), range(out_shape[1])) ] x_list = [] y_list = [] t_list = [] for i in indices: x_list.append(sum((Xs[j] for j in i), [])) y_list.append(sum((ys[j] for j in i), [])) title = f'S{1 + 2 * (i[0] // estimated_series_size)}_D{1 + (i[0] % estimated_series_size)}' if len(i) > 1: title = title + f' - S{1 + 2 * (i[-1] // estimated_series_size) }_D{1 + (i[-1] % estimated_series_size)}' t_list.append(title) if drawing_subsamples: min_samples = np.inf for yi in y_list: logger.debug( f'Checking minimum len(y). current: {len(yi)}, minimum so far: {min_samples}' ) min_samples = min(min_samples, len(yi)) if sup_title: sup_title = sup_title + f' remaining pts: {min_samples}' colors = ["red", "green", "blue", "orange", "purple", "pink", "yellow"] metrics = [] min_x1, min_x2, max_x1, max_x2 = \ reduce(lambda cum, cur: (min(cum[0], min(cur[0])), min(cum[1], min(cur[1])), max(cum[2], max(cur[0])), max(cum[3], max(cur[1]))), zip(*x_list), (np.inf, np.inf, -np.inf, -np.inf)) logger.debug(f'limits: {(min_x1, min_x2, max_x1, max_x2)}') vmax = None for ax, title, x, y in zip(axes1d, t_list, x_list, y_list): x1, x2 = zip(*x) # y = ys[i] remaining_labels = sorted(list(set(y))) '''for label in remaining_labels: p1 = [p for l, p in zip(y, x1) if l is label] p2 = [p for l, p in zip(y, x2) if l is label] cx = np.mean(p1) cy = np.mean(p2) radius = max(np.std(p1),np.std(p2)) circle = plt.Circle((cx, cy), radius, color='gray', zorder=1) ax.add_artist(circle)''' logger.debug( f'remaining labels: {remaining_labels} (#pts in total: {len(y)})') # plt.subplot(len_x_vector, 1, i + 1, aspect='equal') # ax.title(title) color = [ colors[remaining_labels.index(v_y) % len(colors)] for v_y in y ] if drawing_subsamples: together = list(zip(color, x1, x2)) shuffle(together) color, x1, x2 = zip(*together) color = color[:min_samples] x1 = x1[:min_samples] x2 = x2[:min_samples] logger.debug(f'remaining pts: {len(color)}') ax.scatter(x1, x2, c=color, s=1, zorder=2, alpha=0.0015) bins = [41, 41] # [7, 7] # [41, 41] disk_radius = 6 # 6 # 3 # 6 h, x_edges, y_edges = np.histogram2d( x1, x2, bins=bins, density=False, range=[[min_x1, max_x1], [min_x2, max_x2]]) #, bins=(xedges, yedges)) h = h.astype(np.uint8) hist_img = windowed_histogram(h, disk(disk_radius)) hist_img_max = h.copy() for ix, iy in np.ndindex(hist_img_max.shape): hist_img_max[ix, iy] = np.sum([ NORMALIZATION_FACTOR * i_elem * elem for i_elem, elem in enumerate(hist_img[ix][iy]) ]) h = hist_img_max.T #h.T if not vmax: vmax = np.max(h) m1, m2 = np.meshgrid(x_edges, y_edges) logger.debug(f'shape: {m1.shape} vs {m2.shape} vs {h.shape}') ax.pcolormesh( m1, m2, h, zorder=1, vmin=0.0, vmax=vmax, cmap='RdBu') #, shading='gouraud') #150) #0.0001) # cmap='RdBu', ax.set(aspect='equal') ax.set_title(title, fontsize=8) metrics.append(h.reshape(-1)) distance_matrix = squareform(pdist( np.array(metrics))) / NORMALIZATION_FACTOR logger.debug(f'Distance matrix: {distance_matrix}') if sup_title: # sup_title = sup_title + '\nDistances from the first picture: '+np.array2string(distance_matrix[0], precision=1) fig.suptitle(sup_title, fontsize=8) plt.savefig(result_path, dpi='figure', frameon=True, bbox_inches=None) # plt.show() fig, axes = plt.subplots(nrows=1, ncols=1, sharex=True, sharey=True, squeeze=False) axes1d = axes.flatten() mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) part2_x = [np.array(metrics)] part2_tsne = fit_tsne(part2_x) logger.debug(f'tsne: {part2_tsne}') p2_x1, p2_x2 = zip(*part2_tsne[0]) kf = out_shape[1] // agg_shape[0] color = ['red'] * kf + ['green'] * kf + ['blue'] * kf for ax in axes1d: ax.scatter(p2_x1, p2_x2, c=color) for (i, (x, y)) in enumerate(zip(p2_x1, p2_x2)): ax.text(x, y, f'S{1 + 2 * (i // kf) }_D{1 + (i % kf)}') result_path_dynamics = f'{result_path[:-4]}_dynamics.png' plt.savefig(result_path_dynamics, dpi='figure', frameon=True, bbox_inches=None)
def draw_X2(Xs, result_path, out_shape=None, agg_lists=None, drawing_subsamples=False, sup_title=None, validation=None): def split_seq(seq, size): newseq = [] split_size = 1.0 / size * len(seq) for i in range(size): newseq.append( seq[int(round(i * split_size)):int(round((i + 1) * split_size))]) return newseq if not out_shape: out_shape = (1, len(Xs)) # (rows, columns) if not agg_lists: agg_lists = split_seq(list(range(len(Xs))), out_shape[0] * out_shape[1]) symmetric_diff = set([item for s in agg_lists for item in s]).symmetric_difference(range(len(Xs))) assert len( symmetric_diff ) is 0, f'agg_lists: {agg_lists}, symmetric_difference with range({len(Xs)}) is: {symmetric_diff}' assert len(agg_lists) == ( out_shape[0] * out_shape[1]), f'{len(agg_lists)} vs {out_shape[0] * out_shape[1]}' fig_rows, fig_cols = out_shape validation_correct = parse_validation(validation) if validation_correct: fig_cols += 1 plt.rcParams.update({'font.size': 7}) fig, axes = plt.subplots(nrows=fig_rows, ncols=fig_cols, sharex=True, sharey=True, squeeze=False) mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) mng.window.state('zoomed') axes1d = axes.flatten() indices = agg_lists logger.debug(f'Indices: {indices}') x_list = [] t_list = [] for i, ith_agg_list in enumerate(indices): x_list.append(sum((Xs[j] for j in ith_agg_list), [])) title = f'{i} ({len(ith_agg_list)})' t_list.append(title) if drawing_subsamples: min_samples = np.inf for xi in x_list: logger.debug( f'checking minimum len(x). current: {len(xi)}, minimum so far: {min_samples}' ) min_samples = min(min_samples, len(xi)) if sup_title: sup_title = sup_title + f' remaining pts: {min_samples}' metrics = [] min_x1, min_x2, max_x1, max_x2 = \ reduce(lambda cum, cur: (min(cum[0], min(cur[0])), min(cum[1], min(cur[1])), max(cum[2], max(cur[0])), max(cum[3], max(cur[1]))), zip(*x_list), (np.inf, np.inf, -np.inf, -np.inf)) logger.debug(f'limits: {(min_x1, min_x2, max_x1, max_x2)}') # vmax = None for ax, title, x in zip(axes1d, t_list, x_list): x1, x2 = zip(*x) if drawing_subsamples: together = list(zip(x1, x2)) shuffle(together) x1, x2 = zip(*together) x1 = x1[:min_samples] x2 = x2[:min_samples] logger.debug(f'remaining pts: {len(x1)}') ax.scatter(x1, x2, c='red', s=1, zorder=2, alpha=0.0015) bins = [30, 30] # [41, 41] # [7, 7] # [41, 41] disk_radius = 2 # 6 # 6 # 3 # 6 h, x_edges, y_edges = np.histogram2d( x1, x2, bins=bins, density=False, range=[[min_x1, max_x1], [min_x2, max_x2]]) #, bins=(xedges, yedges)) h = h.astype(np.uint8) hist_img = windowed_histogram(h, disk(disk_radius)) hist_img_max = h.copy() for ix, iy in np.ndindex(hist_img_max.shape): hist_img_max[ix, iy] = np.sum([ NORMALIZATION_FACTOR * i_elem * elem for i_elem, elem in enumerate(hist_img[ix][iy]) ]) h = hist_img_max.T #h.T # if not vmax: # vmax = np.max(h) m1, m2 = np.meshgrid(x_edges, y_edges) logger.debug(f'shape: {m1.shape} vs {m2.shape} vs {h.shape}') ax.pcolormesh( m1, m2, h, zorder=1, vmin=0.0, # vmax=vmax, cmap='gray_r' ) # cmap='RdBu') #, shading='gouraud') #150) #0.0001) # cmap='RdBu', ax.set(aspect='equal') ax.set_title(title, fontsize=8) metrics.append(h.reshape(-1)) distance_matrix = squareform(pdist( np.array(metrics))) / NORMALIZATION_FACTOR logger.debug(f'distance matrix: {distance_matrix}') if sup_title: # sup_title = sup_title + '\nDistances from the first picture: '+np.array2string(distance_matrix[0], precision=1) fig.suptitle(sup_title, fontsize=8) plt.savefig(result_path, dpi=1200, frameon=True, bbox_inches=None) # plt.show() fig, axes = plt.subplots(nrows=1, ncols=1, sharex=True, sharey=True, squeeze=False) axes1d = axes.flatten() mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) part2_x = [np.array(metrics)] part2_tsne = fit_tsne(part2_x) logger.debug(f'tsne: {part2_tsne}') p2_x1, p2_x2 = zip(*part2_tsne[0]) colours = ['red', 'green', 'blue', 'black', 'magenta', 'yellow', 'purple'] color = [ i for s in [[c] * out_shape[1] for c in colours[:out_shape[0]]] for i in s ] for ax in axes1d: ax.scatter(p2_x1, p2_x2, c=range(len(t_list)), cmap='gray_r') #, c=color) for (i, (x, y, title)) in enumerate(zip(p2_x1, p2_x2, t_list)): ax.text(x, y, title) result_path_dynamics = f'{result_path[:-4]}_dynamics.png' plt.savefig(result_path_dynamics, dpi='figure', frameon=True, bbox_inches=None) animated_path_dynamics = f'{result_path[:-4]}_animated_dynamics.mp4' animate_part2_tsne = True fps = 10 dpi = 300 if animate_part2_tsne: writer = FFMpegWriter(fps=fps) fig1 = plt.figure() margin = 2 min_x1 = min(p2_x1) - margin min_x2 = min(p2_x2) - margin max_x1 = max(p2_x1) + margin max_x2 = max(p2_x2) + margin l, = plt.plot([], [], 'k-o') plt.xlim(min_x1, max_x1) plt.ylim(min_x2, max_x2) with writer.saving(fig1, animated_path_dynamics, dpi): for i in tqdm(range(len(p2_x1))): l.set_data(p2_x1[max(0, i - 5):i], p2_x2[max(0, i - 5):i]) writer.grab_frame()
def draw_X(Xs, result_path, out_shape=None, agg_shape=None, drawing_subsamples=False, sup_title=None, validation=None): if not out_shape: out_shape = (1, len(Xs)) if not agg_shape: agg_shape = (1, 1) estimated_series_size = agg_shape[1] * out_shape[1] assert agg_shape[0] * agg_shape[1] * out_shape[0] * out_shape[1] is len( Xs ), f'{agg_shape[0] * agg_shape[1] * out_shape[0] * out_shape[1]} vs {len(Xs)}' fig_rows, fig_cols = out_shape validation_correct = parse_validation(validation) if validation_correct: fig_cols += 1 plt.rcParams.update({'font.size': 7}) fig, axes = plt.subplots(nrows=fig_rows, ncols=fig_cols, sharex=True, sharey=True, squeeze=False) mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) mng.window.state('zoomed') axes1d = axes.flatten() r = np.arange(len(Xs)).reshape(out_shape[0] * agg_shape[0], out_shape[1] * agg_shape[1]) a1, a2 = agg_shape indices = [ r[a1 * i:a1 * (i + 1), a2 * j:a2 * (j + 1)].reshape(-1) for i, j in product(range(out_shape[0]), range(out_shape[1])) ] x_list = [] t_list = [] for i in indices: x_list.append(sum((Xs[j] for j in i), [])) title = f'S{1 + (i[0] // estimated_series_size)}_D{1 + (i[0] % estimated_series_size)}' t_list.append(title) if drawing_subsamples: min_samples = np.inf for xi in x_list: logger.debug( f'checking minimum len(x). current: {len(xi)}, minimum so far: {min_samples}' ) min_samples = min(min_samples, len(xi)) if sup_title: sup_title = sup_title + f' remaining pts: {min_samples}' metrics = [] min_x1, min_x2, max_x1, max_x2 = \ reduce(lambda cum, cur: (min(cum[0], min(cur[0])), min(cum[1], min(cur[1])), max(cum[2], max(cur[0])), max(cum[3], max(cur[1]))), zip(*x_list), (np.inf, np.inf, -np.inf, -np.inf)) logger.debug(f'limits: {(min_x1, min_x2, max_x1, max_x2)}') vmax = None for ax, title, x in zip(axes1d, t_list, x_list): x1, x2 = zip(*x) if drawing_subsamples: together = list(zip(x1, x2)) shuffle(together) x1, x2 = zip(*together) x1 = x1[:min_samples] x2 = x2[:min_samples] logger.debug(f'remaining pts: {len(x1)}') ax.scatter(x1, x2, c='red', s=1, zorder=2, alpha=0.0015) bins = [30, 30] # [41, 41] # [7, 7] # [41, 41] disk_radius = 2 # 6 # 6 # 3 # 6 h, x_edges, y_edges = np.histogram2d( x1, x2, bins=bins, density=False, range=[[min_x1, max_x1], [min_x2, max_x2]]) #, bins=(xedges, yedges)) h = h.astype(np.uint8) hist_img = windowed_histogram(h, disk(disk_radius)) hist_img_max = h.copy() for ix, iy in np.ndindex(hist_img_max.shape): hist_img_max[ix, iy] = np.sum([ NORMALIZATION_FACTOR * i_elem * elem for i_elem, elem in enumerate(hist_img[ix][iy]) ]) h = hist_img_max.T #h.T if not vmax: vmax = np.max(h) m1, m2 = np.meshgrid(x_edges, y_edges) logger.debug(f'shape: {m1.shape} vs {m2.shape} vs {h.shape}') ax.pcolormesh( m1, m2, h, zorder=1, vmin=0.0, vmax=vmax, cmap='gray_r' ) # cmap='RdBu') #, shading='gouraud') #150) #0.0001) # cmap='RdBu', ax.set(aspect='equal') ax.set_title(title, fontsize=8) metrics.append(h.reshape(-1)) distance_matrix = squareform(pdist( np.array(metrics))) / NORMALIZATION_FACTOR logger.debug(f'distance matrix: {distance_matrix}') if sup_title: # sup_title = sup_title + '\nDistances from the first picture: '+np.array2string(distance_matrix[0], precision=1) fig.suptitle(sup_title, fontsize=8) plt.savefig(result_path, dpi=1200, frameon=True, bbox_inches=None) # plt.show() fig, axes = plt.subplots(nrows=1, ncols=1, sharex=True, sharey=True, squeeze=False) axes1d = axes.flatten() mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) part2_x = [np.array(metrics)] part2_tsne = fit_tsne(part2_x) logger.debug(f'tsne: {part2_tsne}') p2_x1, p2_x2 = zip(*part2_tsne[0]) kf = out_shape[1] // agg_shape[0] colours = ['red', 'green', 'blue', 'black', 'magenta', 'yellow'] color = [ i for s in [[c] * out_shape[1] for c in colours[:out_shape[0]]] for i in s ] for ax in axes1d: ax.scatter(p2_x1, p2_x2, c=color) for (i, (x, y)) in enumerate(zip(p2_x1, p2_x2)): ax.text(x, y, f'S{1 + (i // kf) }_D{1 + (i % kf)}') result_path_dynamics = f'{result_path[:-4]}_dynamics.png' plt.savefig(result_path_dynamics, dpi='figure', frameon=True, bbox_inches=None)
def get_hits(self, img, print_debug=False): pix_per_cell = self._feature_parameters['hog_pix_per_cell'] x_cells_per_window = self._shape[1] // pix_per_cell - 1 y_cells_per_window = self._shape[0] // pix_per_cell - 1 scales = [ (2.0, 0.0, [ 1/4, 3/4], [.55, .64]), (64/48, 0.5, [0, 1], [.5, .75]), (1.0, 0.5, [1/3, 2/3], [.55, .9]), (4/7, 0.75, [0, 1], [.5, .875]), (0.5, 0.75, [0, 1], [.5, .875]) ] hits = [] if self._feature_parameters['spatial_size']: spatial_scale_x = self._feature_parameters['spatial_size'][0] / self._shape[0] spatial_scale_y = self._feature_parameters['spatial_size'][1] / self._shape[1] for scale, overlap, x_range, y_range in scales: start_time = time.clock() start_hits = len(hits) # Calculate ROI to avoid processing more than we have to roi_x = (int(x_range[0] * img.shape[1]), int(x_range[1] * img.shape[1])) roi_y = (int(y_range[0] * img.shape[0]), int(y_range[1] * img.shape[0])) roi = img[roi_y[0]:roi_y[1], roi_x[0]:roi_x[1], :] # Scale the ROI scaled_shape = (int(roi.shape[1] * scale), int(roi.shape[0] * scale)) scaled_roi = cv2.resize(roi, scaled_shape) # Calculate HOG features for whole scaled ROI at once if self._feature_parameters['hog_channel'] == 'ALL': hog = [get_hog_features(scaled_roi[:,:,c], orient = self._feature_parameters['hog_orient'], pix_per_cell = self._feature_parameters['hog_pix_per_cell'], cell_per_block = self._feature_parameters['hog_cell_per_block'], feature_vec=False) for c in range(scaled_roi.shape[-1])] else: c = self._feature_parameters['hog_channel'] hog = [get_hog_features(scaled_roi[:,:,c], orient = self._feature_parameters['hog_orient'], pix_per_cell = self._feature_parameters['hog_pix_per_cell'], cell_per_block = self._feature_parameters['hog_cell_per_block'], feature_vec=False)] hog_shape = hog[0].shape # Calculate color features for whole scaled ROI at once hist_bins = self._feature_parameters['hist_bins'] if hist_bins > 0: histo = [windowed_histogram((scaled_roi[:,:,c]*255/256*hist_bins).astype(np.uint8), selem=np.ones(self._shape), shift_x = -self._shape[1]/2, shift_y = -self._shape[0]/2, n_bins=self._feature_parameters['hist_bins']) for c in range(scaled_roi.shape[-1])] # Rescale whole ROI for spatial features if self._feature_parameters['spatial_size']: spatial_shape = (int(scaled_shape[0] * spatial_scale_y), int(scaled_shape[1] * spatial_scale_x)) spatial = cv2.resize(scaled_roi, spatial_shape) # Calculate bounds for iterating over the HOG feature image x_start = 0 x_stop = hog_shape[1] - x_cells_per_window + 1 x_step = int((1 - overlap) * x_cells_per_window) y_start = 0 y_stop = hog_shape[0] - y_cells_per_window + 1 y_step = int((1 - overlap) * y_cells_per_window) for x in range(x_start, x_stop, x_step): for y in range(y_start, y_stop, y_step): # Extract color features if self._feature_parameters['hist_bins'] > 0: color_features = np.ravel([h[(y * pix_per_cell), (x * pix_per_cell), :].ravel() for h in histo]) else: color_features = [] # Extract spatial features if self._feature_parameters['spatial_size']: spatial_start_x = int(x*pix_per_cell * spatial_scale_x) spatial_end_x = spatial_start_x + self._feature_parameters['spatial_size'][0] spatial_start_y = int(y*pix_per_cell * spatial_scale_y) spatial_end_y = spatial_start_y + self._feature_parameters['spatial_size'][1] spatial_patch = spatial[spatial_start_y:spatial_end_y, spatial_start_x:spatial_end_x,:] spatial_features = np.ravel(spatial_patch) else: spatial_features = [] # Extract hog features hog_features = np.ravel([h[y:y+y_cells_per_window, x:x+x_cells_per_window].ravel() for h in hog]) # Create window (in unscaled image dimensions) window_start = (roi_x[0] + int(x/scale * pix_per_cell), roi_y[0] + int(y/scale * pix_per_cell)) window_end = (int(window_start[0] + self._shape[1]/scale), int(window_start[1] + self._shape[0]/scale)) # Vectorize features features = np.concatenate((spatial_features, color_features, hog_features)) features = features.reshape(1, -1) features = self._scaler.transform(features) # Check if the window is a vehicle carness = self._classifier.decision_function(features) if carness > 0.3: hits.append((window_start, window_end, scale**2)) end_time = time.clock() if print_debug: print("Scale {:.2f} found {} hits in {} seconds".format(scale, len(hits) - start_hits, end_time - start_time)) return hits
def test_majority(self): img = data.camera() elem = np.ones((3, 3), dtype=np.uint8) expected = rank.windowed_histogram( img, elem).argmax(-1).astype(np.uint8) assert_equal(expected, rank.majority(img, elem))