def prepRF(p): """Summary Helper function that is used to pre-generate the cortical map locations, before the main loop """ ret0 = retina_cuda.create_retina( loc[p], coeff[p], img.shape, (int(img.shape[1] / 2), int(img.shape[0] / 2))) ret1 = retina_cuda.create_retina( dloc[p], dcoeff[p], img.shape, (int(img.shape[1] / 2), int(img.shape[0] / 2))) cort0 = cortex_cuda.create_cortex_from_fields(loc[p], rgb=True) cort1 = cortex_cuda.create_cortex_from_fields(dloc[p], rgb=True) return ret0, ret1, cort0, cort1
def speedup(loc, coeff, img, rgb, show_res): ''' This test measures the performance of the two implementation from initialisation to the end of the cortical transform ''' init_p = time.time() GI = retina.gauss_norm_img(int(img.shape[1] / 2), int(img.shape[0] / 2), coeff, loc, img.shape, rgb) init_c = time.time() ret = retina_cuda.create_retina( loc, coeff, img.shape, (int(img.shape[1] / 2), int(img.shape[0] / 2))) sample_p = time.time() V_p = retina.sample(img, img.shape[1] / 2, img.shape[0] / 2, coeff, loc, rgb) sample_c = time.time() V_c = ret.sample(img) invert_p = time.time() inv_p = retina.inverse(V_p, img.shape[1] / 2, img.shape[0] / 2, coeff, loc, GI, img.shape, rgb) invert_c = time.time() inv_c = ret.inverse(V_c) retina_end = time.time() cort_init_p = time.time() L, R = cortex.LRsplit(loc) L_loc, R_loc = cortex.cort_map(L, R) L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc) cort_init_c = time.time() cort = cortex_cuda.create_cortex_from_fields(loc, rgb=rgb) cort_img_p = time.time() l_p, r_p = cortex.cort_img(V_p, L, L_loc, R, R_loc, cort_size, G) cort_img_c = time.time() l_c = cort.cort_image_left(V_c) r_c = cort.cort_image_right(V_c) cort_end = time.time() print '%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,' % (init_c - init_p, sample_p - init_c, sample_c - sample_p, \ invert_p - sample_c, invert_c - invert_p, retina_end - invert_c,\ cort_init_c - cort_init_p, cort_img_p - cort_init_c, cort_img_c - cort_img_p, cort_end - cort_img_c) if show_res: cv2.namedWindow("inverse CUDA", cv2.WINDOW_NORMAL) cv2.imshow("inverse CUDA", inv_c) cv2.namedWindow("inverse Piotr", cv2.WINDOW_NORMAL) cv2.imshow("inverse Piotr", inv_p) c_c = np.concatenate((np.rot90(l_c), np.rot90(r_c, k=3)), axis=1) c_p = np.concatenate((np.rot90(l_p), np.rot90(r_p, k=3)), axis=1) cv2.namedWindow("cortex CUDA", cv2.WINDOW_NORMAL) cv2.imshow("cortex CUDA", c_c) cv2.namedWindow("cortex Piotr", cv2.WINDOW_NORMAL) cv2.imshow("cortex Piotr", c_p)
def ideal_usage_cam(loc, coeff, img_size, stop=-1, show_res=False, rgb=False): ''' This test shows how the library ideally should be used (unchanged parameters) and measures the performance of this best-case scenario ''' camid = -1 cap = cv2.VideoCapture(camid) while not cap.isOpened(): print 'retrying\n' cv2.VideoCapture(camid).release() cap = cv2.VideoCapture(camid) camid += 1 r, img = cap.read() if not rgb: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # instantiate a retina img = cv2.resize(img, img_size) ret = retina_cuda.create_retina( loc, coeff, img.shape, (int(img.shape[1] / 2), int(img.shape[0] / 2))) # instantiate a cortex cort = cortex_cuda.create_cortex_from_fields(loc, rgb=rgb) # for best performance, do not change these objects count = 0 while cv2.waitKey(10) != ord('q'): r, img = cap.read() if not rgb: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img = cv2.resize(img, img_size) if r: sample = time.time() V = ret.sample(img) invert = time.time() inv_c = ret.inverse(V) cortical = time.time() l_c = cort.cort_image_left(V) r_c = cort.cort_image_right(V) end = time.time() print('%f,%f,%f,' % (invert - sample, cortical - invert, end - cortical)) count += 1 if show_res: cv2.namedWindow("inverse CUDA", cv2.WINDOW_NORMAL) cv2.imshow("inverse CUDA", inv_c) c_c = np.concatenate((np.rot90(l_c), np.rot90(r_c, k=3)), axis=1) cv2.namedWindow("cortex CUDA", cv2.WINDOW_NORMAL) cv2.imshow("cortex CUDA", c_c) if count == stop: break
def correctness_test(loc, coeff, cap, rgb=False): ''' CUDA code uses the minimal initialisation from the host, all tracatable values are computed on the GPU Get an image from the camera, generate inverse and cortical image with both implementation and subtract the results ''' r, img = cap.read() if not rgb: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # create CUDA objects to pass to evaluation ret = retina_cuda.create_retina( loc, coeff, img.shape, (int(img.shape[1] / 2), int(img.shape[0] / 2)), None) cort = cortex_cuda.create_cortex_from_fields(loc, rgb=rgb) while ord('q') != cv2.waitKey(10): r, img = cap.read() if not rgb: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if r: ''' Sample the image img with CUDA retina ret, inverse transform it with ret and create the cortical image with CUDA cortex cort Sample and generate retina and cortical images from img with Piotrs's code Visually compare the results by showing the subtraction of the generatd images ''' V_c = ret.sample(img) # sample with CUDA inv_c = ret.inverse(V_c) # inverse with CUDA l_c = cort.cort_image_left(V_c) # left cortical image CUDA r_c = cort.cort_image_right(V_c) # right cortical image CUDA c_c = np.concatenate( (np.rot90(l_c), np.rot90(r_c, k=3)), axis=1) #concatenate the results into one image # create Piotr's retina and cortical images GI = retina.gauss_norm_img(int(img.shape[1] / 2), int(img.shape[0] / 2), coeff, loc, img.shape, rgb) L, R = cortex.LRsplit(loc) L_loc, R_loc = cortex.cort_map(L, R) L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc) V_p = retina.sample(img, img.shape[1] / 2, img.shape[0] / 2, coeff, loc, rgb) inv_p = retina.inverse(V_p, img.shape[1] / 2, img.shape[0] / 2, coeff, loc, GI, img.shape, rgb) l_p, r_p = cortex.cort_img(V_p, L, L_loc, R, R_loc, cort_size, G) c_p = np.concatenate((np.rot90( l_p[:l_c.shape[0], :]), np.rot90(r_p[:r_c.shape[0], :], k=3)), axis=1) # show CUDA results cv2.namedWindow("inverse CUDA", cv2.WINDOW_NORMAL) cv2.imshow("inverse CUDA", inv_c) cv2.namedWindow("cortex CUDA", cv2.WINDOW_NORMAL) cv2.imshow("cortex CUDA", c_c) # show Piotr's results cv2.namedWindow("inverse Piotr", cv2.WINDOW_NORMAL) cv2.imshow("inverse Piotr", inv_p) cv2.namedWindow("cortex Piotr", cv2.WINDOW_NORMAL) cv2.imshow("cortex Piotr", c_p) # show the difference of the images cv2.namedWindow("inverse diff", cv2.WINDOW_NORMAL) cv2.imshow("inverse diff", np.power((inv_c - inv_p), 2) * 255) cv2.namedWindow("cortex diff", cv2.WINDOW_NORMAL) cv2.imshow("cortex diff", np.power((c_c - c_p), 2) * 255)
def compatibility_test(loc, coeff, cap, rgb=False): ''' CUDA code uses different initialisations, passed parameters are the results of Piotr's code Initialise retina and cortex with external parameters Process camera stream ''' r, img = cap.read() if not rgb: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Get parameters calculated by Piotr's code GI = retina.gauss_norm_img(int(img.shape[1] / 2), int(img.shape[0] / 2), coeff, loc, img.shape, rgb) L, R = cortex.LRsplit(loc) L_loc, R_loc = cortex.cort_map(L, R) L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc) # CUDA # first retina creates everything on the GPU, proved to be identical with Piotr's implementation ret0 = retina_cuda.create_retina(loc, coeff, img.shape, (img.shape[1] / 2, img.shape[0] / 2), None) # second retina uses the GI from Piotr ret1 = retina_cuda.create_retina(loc, coeff, img.shape, (img.shape[1] / 2, img.shape[0] / 2), GI) # first cortex creates everything on the GPU, proved to be identical with Piotr's implementation cort0 = cortex_cuda.create_cortex_from_fields(loc, rgb=rgb) # second cortex gets all the parameters from Piotr's code cort1 = cortex_cuda.create_cortex_from_fields_and_locs( L, R, L_loc, R_loc, (cort0.cort_image_size[0], cort_size[1]), gauss100=G, rgb=rgb) # read camera stream while ord('q') != cv2.waitKey(10): r, img = cap.read() if not rgb: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if r: ''' Sample the image img with ret0 and ret1, inverse transform the image vectors create the cortical image with cort0 and cort1 Visually compare the results by showing the subtraction of the generatd images ''' V0 = ret0.sample(img) # sample with reference ret inv0 = ret0.inverse(V0) # inverse with reference ret l_c0 = cort0.cort_image_left( V0) # left cortical image reference cort r_c0 = cort0.cort_image_right( V0) # right cortical image refernce cort c_c0 = np.concatenate( (np.rot90(l_c0), np.rot90(r_c0, k=3)), axis=1) #concatenate the results into one image V1 = ret0.sample(img) # sample with reference ret inv1 = ret0.inverse(V0) # inverse with reference ret l_c1 = cort1.cort_image_left( V1) # left cortical image reference cort r_c1 = cort1.cort_image_right( V1) # right cortical image refernce cort c_c1 = np.concatenate( (np.rot90(l_c1[:, :]), np.rot90(r_c1[:, :], k=3)), axis=1) #concatenate the results into one image # sampling error between the two instance print('Sampling difference: %f' % np.sum(V1 - V0)) # show CUDA results cv2.namedWindow("inverse ref", cv2.WINDOW_NORMAL) cv2.imshow("inverse ref", inv0) cv2.namedWindow("cortex ref", cv2.WINDOW_NORMAL) cv2.imshow("cortex ref", c_c0) # show Piotr's results cv2.namedWindow("inverse toprove", cv2.WINDOW_NORMAL) cv2.imshow("inverse toprove", inv1) cv2.namedWindow("cortex toprove", cv2.WINDOW_NORMAL) cv2.imshow("cortex toprove", c_c1) # show the difference of the images print len( np.nonzero(inv0 - inv1)[0]) / (img.shape[0] * img.shape[1]) cv2.namedWindow("inverse diff", cv2.WINDOW_NORMAL) cv2.imshow("inverse diff", np.abs(inv0 - inv1) * 255) cv2.namedWindow("cortex diff", cv2.WINDOW_NORMAL) cv2.imshow("cortex diff", np.abs(c_c0 - c_c1) * 255)
coeff50k = pickle.load(handle) img_sizes = [(480,320),(640,480),(800,600),(1080,720),(1280,1024),(1920,1080)] L, R = cortex.LRsplit(loc50k) L_loc, R_loc = cortex.cort_map(L, R) L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc) retinas = np.empty((2,5,6), dtype=object) cortexes = np.empty((2,5), dtype=object) for i in range(0,4): for ind, img in enumerate(img_sizes): retinas[0,i,ind] = retina_cuda.create_retina(loc[i], coeff[i], (img[1],img[0],3), (int(img[0]/2), int(img[1]/2))) retinas[1,i,ind] = retina_cuda.create_retina(loc[i], coeff[i], (img[1], img[0]), (int(img[0]/2), int(img[1]/2))) cortexes[0,i] = cortex_cuda.create_cortex_from_fields(loc[i], rgb=True) cortexes[1,i] = cortex_cuda.create_cortex_from_fields(loc[i], rgb=False) for ind,img in enumerate(img_sizes): retinas[0,4,ind] = retina_cuda.create_retina(loc50k, coeff50k, (img[1],img[0],3), (int(img[0]/2), int(img[1]/2))) retinas[1,4,ind] = retina_cuda.create_retina(loc50k, coeff50k, (img[1], img[0]), (int(img[0]/2), int(img[1]/2))) cortexes[0][4] = cortex_cuda.create_cortex_from_fields_and_locs(L, R, L_loc, R_loc, cort_size, gauss100=G, rgb=True) cortexes[1][4] = cortex_cuda.create_cortex_from_fields_and_locs(L, R, L_loc, R_loc, cort_size, gauss100=G, rgb=False) #### TRACKBAR def nothing(x): pass cv2.namedWindow('Settings', cv2.WINDOW_NORMAL) cv2.createTrackbar('Retina','Settings',0,4,nothing) cv2.createTrackbar('Image size', 'Settings',0,5,nothing) cv2.createTrackbar('Color mode','Settings',0,1,nothing)