def showNonOpponency(C, theta): """Summary This function encapsulates the routine to generate backprojected and cortical views for the magnocellular pathway retinal ganglion cells Args: C (vector): The sharp retina is passed to the function theta (float): A threshold value is passed to the function Returns: merged: Return a merged image of the backprojected view as a numpy image array mergecort: Return a merged image of the cortical view as a numpy image array """ GI = retina.gauss_norm_img(x, y, dcoeff[i], dloc[i], imsize=imgsize, rgb=False) # Sample using the other recepetive field, note there is no temporal response with still images S = retina.sample(img, x, y, dcoeff[i], dloc[i], rgb=True) #backproject the imagevectors ncentreV, nsurrV = rgc.nonopponency(C, S, theta) ninverse = retina.inverse(ncentreV, x, y, dcoeff[i], dloc[i], GI, imsize=imgsize, rgb=True) ninv_crop = retina.crop(ninverse, x, y, dloc[i]) ninverse2 = retina.inverse(nsurrV, x, y, dcoeff[i], dloc[i], GI, imsize=imgsize, rgb=True) ninv_crop2 = retina.crop(ninverse2, x, y, dloc[i]) # place descriptive text onto generated images cv2.putText(ninv_crop, "R+G + ", (xx, yy), font, 1, (255, 255, 255), 2) cv2.putText(ninv_crop2, "R+G - ", (xx, yy), font, 1, (255, 255, 255), 2) merged = np.concatenate((ninv_crop, ninv_crop2), axis=1) # create cortical maps of the imagevectors lposnon, rposnon = cortex.cort_img(ncentreV, L, L_loc, R, R_loc, cort_size, G) lnegnon, rnegnon = cortex.cort_img(nsurrV, L, L_loc, R, R_loc, cort_size, G) pos_cort_img = np.concatenate((np.rot90(lposnon), np.rot90(rposnon, k=3)), axis=1) neg_cort_img = np.concatenate((np.rot90(lnegnon), np.rot90(rnegnon, k=3)), axis=1) mergecort = np.concatenate((pos_cort_img, neg_cort_img), axis=1) return merged, mergecort
def showBPImg(pV, nV): """Summary This function encapsulates the routine to generate rectified backprojected views of all opponent retinal ganglion cells Args: pV (vector): Positive rectified imagevector nV (vector): Negative rectified imagevector Returns: merge: Return a merged image of all backprojected opponent cells as a numpy image array """ # object arrays of the positive and negative images inv_crop = np.empty(8, dtype=object) inv_crop2 = np.empty(8, dtype=object) for t in range(8): # backprojection functions inverse = retina.inverse(pV[:, t, :], x, y, dcoeff[i], dloc[i], GI, imsize=imgsize, rgb=True) inv_crop[t] = retina.crop(inverse, x, y, dloc[i]) inverse2 = retina.inverse(nV[:, t, :], x, y, dcoeff[i], dloc[i], GI, imsize=imgsize, rgb=True) inv_crop2[t] = retina.crop(inverse2, x, y, dloc[i]) # place descriptions cv2.putText(inv_crop[t], types[t] + " + ", (1, 270), font, 1, (0, 255, 255), 2) cv2.putText(inv_crop2[t], types[t] + " - ", (1, 270), font, 1, (0, 255, 255), 2) # stack all images into a grid posRG = np.vstack((inv_crop[:4])) negRG = np.vstack((inv_crop2[:4])) posYB = np.vstack((inv_crop[4:])) negYB = np.vstack((inv_crop2[4:])) merge = np.concatenate((posRG, negRG, posYB, negYB), axis=1) return merge
def showNonOpponency(C, theta): """Summary This function encapsulates the routine to generate backprojected and cortical views for the magnocellular pathway retinal ganglion cells Args: C (vector): The sharp retina is passed to the function theta (float): A threshold value is passed to the function Returns: merged: Return a merged image of the backprojected view as a numpy image array mergecort: Return a merged image of the cortical view as a numpy image array """ # Sample using the other recepetive field, but with a temporally different image, lateimg S = retina.sample(lateimg, x, y, dcoeff[i], dloc[i], rgb=True) ncentreV, nsurrV = rgc.nonopponency(C, S, theta) ninverse = retina.inverse(ncentreV, x, y, dcoeff[i], dloc[i], GI, imsize=imgsize, rgb=False) ninv_crop = retina.crop(ninverse, x, y, dloc[i]) ninverse2 = retina.inverse(nsurrV, x, y, dcoeff[i], dloc[i], GI, imsize=imgsize, rgb=False) ninv_crop2 = retina.crop(ninverse2, x, y, dloc[i]) merged = np.concatenate((ninv_crop, ninv_crop2), axis=1) lposnon, rposnon = cortex.cort_img(ncentreV, L, L_loc, R, R_loc, cort_size, G) lnegnon, rnegnon = cortex.cort_img(nsurrV, L, L_loc, R, R_loc, cort_size, G) pos_cort_img = np.concatenate((np.rot90(lposnon), np.rot90(rposnon, k=3)), axis=1) neg_cort_img = np.concatenate((np.rot90(lnegnon), np.rot90(rnegnon, k=3)), axis=1) mergecort = np.concatenate((pos_cort_img, neg_cort_img), axis=1) return merged, mergecort
def speedup(loc, coeff, img, rgb, show_res): ''' This test measures the performance of the two implementation from initialisation to the end of the cortical transform ''' init_p = time.time() GI = retina.gauss_norm_img(int(img.shape[1] / 2), int(img.shape[0] / 2), coeff, loc, img.shape, rgb) init_c = time.time() ret = retina_cuda.create_retina( loc, coeff, img.shape, (int(img.shape[1] / 2), int(img.shape[0] / 2))) sample_p = time.time() V_p = retina.sample(img, img.shape[1] / 2, img.shape[0] / 2, coeff, loc, rgb) sample_c = time.time() V_c = ret.sample(img) invert_p = time.time() inv_p = retina.inverse(V_p, img.shape[1] / 2, img.shape[0] / 2, coeff, loc, GI, img.shape, rgb) invert_c = time.time() inv_c = ret.inverse(V_c) retina_end = time.time() cort_init_p = time.time() L, R = cortex.LRsplit(loc) L_loc, R_loc = cortex.cort_map(L, R) L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc) cort_init_c = time.time() cort = cortex_cuda.create_cortex_from_fields(loc, rgb=rgb) cort_img_p = time.time() l_p, r_p = cortex.cort_img(V_p, L, L_loc, R, R_loc, cort_size, G) cort_img_c = time.time() l_c = cort.cort_image_left(V_c) r_c = cort.cort_image_right(V_c) cort_end = time.time() print '%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,' % (init_c - init_p, sample_p - init_c, sample_c - sample_p, \ invert_p - sample_c, invert_c - invert_p, retina_end - invert_c,\ cort_init_c - cort_init_p, cort_img_p - cort_init_c, cort_img_c - cort_img_p, cort_end - cort_img_c) if show_res: cv2.namedWindow("inverse CUDA", cv2.WINDOW_NORMAL) cv2.imshow("inverse CUDA", inv_c) cv2.namedWindow("inverse Piotr", cv2.WINDOW_NORMAL) cv2.imshow("inverse Piotr", inv_p) c_c = np.concatenate((np.rot90(l_c), np.rot90(r_c, k=3)), axis=1) c_p = np.concatenate((np.rot90(l_p), np.rot90(r_p, k=3)), axis=1) cv2.namedWindow("cortex CUDA", cv2.WINDOW_NORMAL) cv2.imshow("cortex CUDA", c_c) cv2.namedWindow("cortex Piotr", cv2.WINDOW_NORMAL) cv2.imshow("cortex Piotr", c_p)
def showBPImg(pV, nV, t): """Summary This function encapsulates the routine to generate rectified backprojected views of one opponent retinal ganglion cell Args: pV (vector): Positive rectified imagevector nV (vector): Negative rectified imagevector t (int): Index position of opponent cell species Returns: merge: Return a merged image of all backprojected opponent cells as a numpy image array """ # backprojection functions inverse = retina.inverse(pV[:, t, :], x, y, dcoeff[i], dloc[i], GI, imsize=imgsize, rgb=True) inv_crop = retina.crop(inverse, x, y, dloc[i]) inverse2 = retina.inverse(nV[:, t, :], x, y, dcoeff[i], dloc[i], GI, imsize=imgsize, rgb=True) inv_crop2 = retina.crop(inverse2, x, y, dloc[i]) # place descriptions cv2.putText(inv_crop, types[t] + " + ", (1, 270), font, 1, (0, 255, 255), 2) cv2.putText(inv_crop2, types[t] + " - ", (1, 270), font, 1, (0, 255, 255), 2) merge = np.concatenate((inv_crop, inv_crop2), axis=1) return merge
def preview(): stdimg_dir = os.getcwd() + os.sep + 'testimage\\' print "Using " + os.listdir(stdimg_dir)[0] name = os.listdir(stdimg_dir)[0] standard_image = cv2.imread(stdimg_dir + name, ) img = cv2.normalize(standard_image.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX) img = cv2.cvtColor(standard_image, cv2.COLOR_BGR2GRAY) x, y = img.shape[1] / 2, img.shape[0] / 2 size = img.shape oz_V = retina.sample(img, x, y, ozimek_coeff, ozimek_loc, rgb=False) oz_GI = retina.gauss_norm_img(x, y, ozimek_coeff, ozimek_loc, imsize=size) oz_I = retina.inverse(oz_V, x, y, ozimek_coeff, ozimek_loc, oz_GI, imsize=size, rgb=False) oz_I_crop = retina.crop(oz_I, x, y, ozimek_loc) oz_GI_crop = retina.crop(oz_GI, x, y, ozimek_loc) # test application of retinal receptive field plt.figure(figsize=(6, 6), num="Test application of retinal field") plt.axis('off') plt.imshow(oz_I_crop, cmap='gray') plt.show() #heatmap of retinal receptive field plt.figure(figsize=(6, 6), num="Heatmap of retina") plt.axis('off') plt.imshow(oz_GI_crop, cmap='RdBu') plt.show()
def correctness_test(loc, coeff, cap, rgb=False): ''' CUDA code uses the minimal initialisation from the host, all tracatable values are computed on the GPU Get an image from the camera, generate inverse and cortical image with both implementation and subtract the results ''' r, img = cap.read() if not rgb: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # create CUDA objects to pass to evaluation ret = retina_cuda.create_retina( loc, coeff, img.shape, (int(img.shape[1] / 2), int(img.shape[0] / 2)), None) cort = cortex_cuda.create_cortex_from_fields(loc, rgb=rgb) while ord('q') != cv2.waitKey(10): r, img = cap.read() if not rgb: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if r: ''' Sample the image img with CUDA retina ret, inverse transform it with ret and create the cortical image with CUDA cortex cort Sample and generate retina and cortical images from img with Piotrs's code Visually compare the results by showing the subtraction of the generatd images ''' V_c = ret.sample(img) # sample with CUDA inv_c = ret.inverse(V_c) # inverse with CUDA l_c = cort.cort_image_left(V_c) # left cortical image CUDA r_c = cort.cort_image_right(V_c) # right cortical image CUDA c_c = np.concatenate( (np.rot90(l_c), np.rot90(r_c, k=3)), axis=1) #concatenate the results into one image # create Piotr's retina and cortical images GI = retina.gauss_norm_img(int(img.shape[1] / 2), int(img.shape[0] / 2), coeff, loc, img.shape, rgb) L, R = cortex.LRsplit(loc) L_loc, R_loc = cortex.cort_map(L, R) L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc) V_p = retina.sample(img, img.shape[1] / 2, img.shape[0] / 2, coeff, loc, rgb) inv_p = retina.inverse(V_p, img.shape[1] / 2, img.shape[0] / 2, coeff, loc, GI, img.shape, rgb) l_p, r_p = cortex.cort_img(V_p, L, L_loc, R, R_loc, cort_size, G) c_p = np.concatenate((np.rot90( l_p[:l_c.shape[0], :]), np.rot90(r_p[:r_c.shape[0], :], k=3)), axis=1) # show CUDA results cv2.namedWindow("inverse CUDA", cv2.WINDOW_NORMAL) cv2.imshow("inverse CUDA", inv_c) cv2.namedWindow("cortex CUDA", cv2.WINDOW_NORMAL) cv2.imshow("cortex CUDA", c_c) # show Piotr's results cv2.namedWindow("inverse Piotr", cv2.WINDOW_NORMAL) cv2.imshow("inverse Piotr", inv_p) cv2.namedWindow("cortex Piotr", cv2.WINDOW_NORMAL) cv2.imshow("cortex Piotr", c_p) # show the difference of the images cv2.namedWindow("inverse diff", cv2.WINDOW_NORMAL) cv2.imshow("inverse diff", np.power((inv_c - inv_p), 2) * 255) cv2.namedWindow("cortex diff", cv2.WINDOW_NORMAL) cv2.imshow("cortex diff", np.power((c_c - c_p), 2) * 255)