def __init__(self, agent, game, show=False): self._agent = agent self._game = game self.show = show if show: self._display = show_img( ) # display the processed image on screen using openCV, implemented using python coroutine self._display.__next__() # initiliaze the display coroutine
from edge import get_edge_img from multi_res import get_mr_img_from_rgb_img from utilities import show_img from multi_res_lic import get_mrl def get_stroke_img(edge_img, mrl): return edge_img + mrl def get_stroke_img_from_rgb(img): mr_img = get_mr_img_from_rgb_img(img) _, _, edge_img = get_edge_img(mr_img) mrl_img = get_mrl(img) out = get_stroke_img(edge_img, mrl_img) return out if __name__ == "__main__": img = cv2.imread("../images/temp.jpeg") img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) mr_img = get_mr_img_from_rgb_img(img) _, _, edge_img = get_edge_img(mr_img, thresh=110, thresh2=0.5) mrl_img = get_mrl(img) out = get_stroke_img(edge_img, mrl_img) show_img(img, splt=221, gray=False) show_img(edge_img, splt=222) show_img(mrl_img, splt=223) show_img(out, splt=224) plt.show()
from noise_pyramid import get_np_vec_from_rgb from utilities import show_img import vectorplot as vp def get_lp(img): noise_py, vecs = get_np_vec_from_rgb(img) KW = 10 kernel = np.arange(KW) + 1 kernel = np.minimum(kernel, kernel[::-1]) kernel = kernel / np.sum(kernel) kernel = kernel.astype(np.float32) lic_py = [] for noise, vec in zip(noise_py, vecs): u, v = vec[..., 0], vec[..., 1] u = u.astype(np.float32) v = v.astype(np.float32) noise_f = noise.astype(np.float32) data = vp.line_integral_convolution(v, u, noise_f, kernel) lic_py.append(data) return lic_py if __name__ == "__main__": img = cv2.imread("../images/temp.jpeg") img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) lic_py = get_lp(img) for i, im in enumerate(lic_py): show_img(im, splt=221 + i) plt.show()
resized = cv2.resize(smoothed, None, fx=0.5, fy=0.5) if sm_size: resized = cv2.resize( resized, (im.shape[1], im.shape[0]), fx=2.0, fy=2.0) resized = (0.9 * resized).astype(np.uint8) result.append(resized) get_gp(resized, result, N - 1, sm_size=sm_size) if __name__ == "__main__": img = cv2.imread('../images/temp.jpeg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.hist(img.ravel(), 256, [0, 256]) plt.show() out = [] N = 3 get_gp(img, out, N) print(len(out)) for ind, i in enumerate(out): show_img(i, splt=421 + 2*ind, orig_contrast=True) show_img(i, splt=421 + 2*ind+1) print(i.shape) plt.show() for ind, i in enumerate(out): plt.subplot(221 + ind) plt.hist(i.ravel(), 256, [0, 256]) print(i.shape) plt.show()
from lic_pyramid import get_lp from multi_res import get_mr_img from utilities import show_img def get_mrl(img): lic_py = get_lp(img) _,_,_,d_map = get_dmap(img) lic_sm = np.zeros((4, *lic_py[0].shape)) sh = lic_py[0].shape for i in range(4): lic_sm[i] = cv2.resize(lic_py[i], (sh[1], sh[0])) mr_lic_img = get_mr_img(lic_sm, d_map) return mr_lic_img # [print(i.shape) for i in lic_sm] # n = 4 # r = d_map * (n-1) # rin = r.astype(int) # a = r - rin pass if __name__ == "__main__": img = cv2.imread("../images/squirrel.jpeg") img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # img = cv2.resize(img, (300,300)) out = get_mrl(img) show_img(out) plt.show()
# out_2 = avg_strength(out_1, thresh=-20) res = out_1.copy() print(np.amax(avg_img), np.amin(avg_img)) res[avg_img > thresh] = 255 # diff = out_1 - avg_img # print(np.amax(diff), np.amin(diff)) # print(thresh) # res[diff > -thresh] = 255 # res[np.where(diff > -20)] = 255 return out_1, avg_img, res if __name__ == "__main__": img = cv2.imread("../images/temp.jpeg") img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # img = cv2.resize(img, (648, 292)) out = get_mr_img_from_rgb_img(img) for i in range(50, 250, 10): plt.figure() plt.suptitle("E = " + str(i)) edge_img1, edge_img2, diff = get_edge_img(out, thresh=i, thresh2=0.5) show_img(out, splt=221, title="Multi Resolution Image") show_img(edge_img1, splt=222, title="Edge Image 1") show_img(edge_img2, splt=223, title="Edge Image 2") show_img(diff, splt=224, title="Edge Image 2") plt.show() sleep(5)
def get_np_vec_from_rgb(img): mc_gp = [img] get_gp(img,mc_gp,2) out = [] vecs = [] for ind, im in enumerate(mc_gp): print(ind) img_gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY) img_lab = cv2.cvtColor(im, cv2.COLOR_RGB2LAB) labels, label_counts = label_regions( img_lab, img_lab.shape[0] * img_lab.shape[1] // 8) vec = extract_region_vector_field(img_gray, labels, label_counts) im_noise = generate_noise_image(img_gray, labels, label_counts) out.append(im_noise) vecs.append(vec) return out, vecs if __name__ == "__main__": img = cv2.imread('../images/elephant.png') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) mc_gp = [img] get_gp(img,mc_gp,2) out = get_np_from_rgb_mc_gp(mc_gp) for ind, i in enumerate(out): show_img(i, splt=221+ind) plt.show()
def classes_only(use_saved_model=True): ''' just predictions printed out on images :return: ''' global PRETRAINED_MODEL, IMG_SZ, BATCH_SIZE, tfms, md, learn, lr, lrs, y, x, fig, axes, i, ax, ima # load a model PRETRAINED_MODEL = resnet34 IMG_SZ = 224 BATCH_SIZE = 64 tfms = tfms_from_model(PRETRAINED_MODEL, IMG_SZ, crop_type=CropType.NO) md = ImageClassifierData.from_csv(PATH, JPEGS, MC_CSV, tfms=tfms, bs=BATCH_SIZE) learn = ConvLearner.pretrained(PRETRAINED_MODEL, md) learn.opt_fn = optim.Adam if (use_saved_model == False): #find a learning rate lrf = learn.lr_find(1e-5, 100) learn.sched.plot(0) # based on plot above choose following LR lr = 2e-2 # train for a bit learn.fit(lr, 1, cycle_len=3, use_clr=(32, 5)) # choose differential learning rates lrs = np.array([lr / 100, lr / 10, lr]) # freeze all but last 2 layers learn.freeze_to(-2) # find a better LR learn.lr_find(lrs / 1000) learn.sched.plot(0) # train some more learn.fit(lrs / 10, 1, cycle_len=5, use_clr=(32, 5)) # save then load the model learn.save('mclas') learn.load('mclas') y = learn.predict() x, _ = next(iter(md.val_dl)) x = to_np(x) fig, axes = plt.subplots(3, 4, figsize=(12, 8)) for i, ax in enumerate(axes.flat): ima = md.val_ds.denorm(x)[i] ya = np.nonzero(y[i] > 0.4)[0] b = '\n'.join(md.classes[o] for o in ya) ax = ut.show_img(ima, ax=ax) draw_text(ax, (0, 0), b) plt.tight_layout()
plt.subplots_adjust(top=4, right=3) for angle in angles: # plt.figure() # plt.suptitle("theta = "+str(angle)) fil, ker = apply_gabor(img, angle) np.maximum(accum, fil, accum) res.append(fil) # # show_img(img, splt=(n,3,k), title="input") # # show_img(ker, splt=(n,3,k+1), title="filter") # show_img(fil, splt=(n,1,k), title="output") k += 3 plt.show() return res, accum if __name__ == "__main__": img = cv2.imread('../images/elephant.png') img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # apply_gabor(img,None) angles = [] for theta in np.arange(0, np.pi, np.pi / 16): angles.append(theta) out, fin = gabor_pyramid(img, angles) plt.figure() cv2.imwrite("../images/out.png", fin) show_img(fin) plt.show() plt.figure() for i in range(4): show_img(out[i], splt=221 + i) plt.show()
if img.shape[0] < 257 or img.shape[1] < 257: img = cv2.resize(img, (300, 300)) if img_path == "../images/castle.jpeg": print(img.shape) img = cv2.resize(img, None, fx=2.0, fy=2.0) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) mr_img = get_mr_img_from_rgb_img(img) _, _, edge_img = get_edge_img(mr_img, thresh=110, thresh2=0.1) mrl_img = get_mrl(img) strk_img = get_stroke_img(edge_img, mrl_img) out = composite_paper(strk_img, bg_img, 0.4) edge_img = edge_img.astype(np.uint8) show_img(img, splt=321, gray=False, title="Input") show_img(edge_img, splt=322, title="Edge Image") show_img(mrl_img, splt=323, title="Multi-resolution LIC Image") show_img(strk_img, splt=324, title="Stroke Image") show_img(out, splt=325, title="Output") plt.show() plt.figure() plt.suptitle("Final Output") show_img(img, splt=121, title="Input") show_img(out, splt=122, title="Output") plt.show() plt.figure() for ind, i in enumerate(range(3, 9)): out = composite_paper(strk_img, bg_img, alpha=i / 10) show_img(out, splt=321 + ind) plt.show()
if __name__ == '__main__': try: path = sys.argv[1] except: path = '../images/lena.jpg' print(cv2.__version__) img = cv2.imread(path) img = cv2.resize(img, (300, 300)) saliency_map, binarized_map, salient_region, out = get_dmap(img, saliency_met=0) # plt.suptitle("Saliency M") show_img(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), gray=False, splt=221, title="Input Image") show_img(saliency_map, title="Saliency Map", splt=222) show_img(binarized_map, title="Binarized saliency map", gray=False, splt=223) # show_img(cv2.cvtColor(salient_region, cv2.COLOR_BGR2RGB), gray=False) show_img(out, title="Draw Map", splt=224) plt.show() saliency_map, binarized_map, salient_region, out = get_dmap(img, saliency_met=1) plt.figure() plt.suptitle("Method 1")
return out if __name__ == "__main__": img = cv2.imread("../images/temp.jpeg") img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # img = cv2.resize(img, (300,300)) s_map, _, _, d_map = get_dmap(img) img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) mc_gauss_py = [img] get_gp(img, mc_gauss_py, 2, sm_size=True) tem = np.zeros((4, img.shape[0], img.shape[1])) for i in range(4): tem[i, :, :] = mc_gauss_py[i] # mc_gauss_py = np.concatenate(tuple(mc_gauss_py),axis=0) # mc_gauss_py = np.array(mc_gauss_py) # print('gvv',mc_gauss_py.shape) [print(i.shape) for i in mc_gauss_py] out = get_mr_img(tem, d_map) print(out.shape) show_img(img, splt=221, title="Input") show_img(s_map, splt=222, gray=True, title="Saliency map") show_img(d_map, splt=223, title="draw_map") show_img(out, splt=224, title="Multi resolution") plt.show()