def showNonOpponency(C, theta): """Summary This function encapsulates the routine to generate backprojected and cortical views for the magnocellular pathway retinal ganglion cells Args: C (vector): The sharp retina is passed to the function theta (float): A threshold value is passed to the function Returns: merged: Return a merged image of the backprojected view as a numpy image array mergecort: Return a merged image of the cortical view as a numpy image array """ GI = retina.gauss_norm_img(x, y, dcoeff[i], dloc[i], imsize=imgsize, rgb=False) # Sample using the other recepetive field, note there is no temporal response with still images S = retina.sample(img, x, y, dcoeff[i], dloc[i], rgb=True) #backproject the imagevectors ncentreV, nsurrV = rgc.nonopponency(C, S, theta) ninverse = retina.inverse(ncentreV, x, y, dcoeff[i], dloc[i], GI, imsize=imgsize, rgb=True) ninv_crop = retina.crop(ninverse, x, y, dloc[i]) ninverse2 = retina.inverse(nsurrV, x, y, dcoeff[i], dloc[i], GI, imsize=imgsize, rgb=True) ninv_crop2 = retina.crop(ninverse2, x, y, dloc[i]) # place descriptive text onto generated images cv2.putText(ninv_crop, "R+G + ", (xx, yy), font, 1, (255, 255, 255), 2) cv2.putText(ninv_crop2, "R+G - ", (xx, yy), font, 1, (255, 255, 255), 2) merged = np.concatenate((ninv_crop, ninv_crop2), axis=1) # create cortical maps of the imagevectors lposnon, rposnon = cortex.cort_img(ncentreV, L, L_loc, R, R_loc, cort_size, G) lnegnon, rnegnon = cortex.cort_img(nsurrV, L, L_loc, R, R_loc, cort_size, G) pos_cort_img = np.concatenate((np.rot90(lposnon), np.rot90(rposnon, k=3)), axis=1) neg_cort_img = np.concatenate((np.rot90(lnegnon), np.rot90(rnegnon, k=3)), axis=1) mergecort = np.concatenate((pos_cort_img, neg_cort_img), axis=1) return merged, mergecort
def showNonOpponency(C, theta): """Summary This function encapsulates the routine to generate backprojected and cortical views for the magnocellular pathway retinal ganglion cells Args: C (vector): The sharp retina is passed to the function theta (float): A threshold value is passed to the function Returns: merged: Return a merged image of the backprojected view as a numpy image array mergecort: Return a merged image of the cortical view as a numpy image array """ # Sample using accelerated retina function S = ret1.sample(lateimg) # SURROUND S = retina_cuda.convert_to_Piotr(S) #showretina: #return the modified,rectified imagevectors ncentreV, nsurrV = rgc.nonopponency(C, S, theta) # generate packprojected images ninverse = ret0.inverse( retina_cuda.convert_from_Piotr(ncentreV.astype(float))) ninv_crop = retina.crop(ninverse, int(img.shape[1] / 2), int(img.shape[0] / 2), loc[0]) ninverse2 = ret1.inverse( retina_cuda.convert_from_Piotr(nsurrV.astype(float))) ninv_crop2 = retina.crop(ninverse2, int(img.shape[1] / 2), int(img.shape[0] / 2), dloc[0]) # place descriptive text onto generated images cv2.putText(ninv_crop, "R+G + ", (1, 270), font, 1, (0, 255, 255), 2) cv2.putText(ninv_crop2, "R+G - ", (1, 270), font, 1, (0, 255, 255), 2) merged = np.concatenate((ninv_crop, ninv_crop2), axis=1) #showcortex ## create cortical maps of the imagevectors using accelerated functions lposnon = cort0.cort_image_left( retina_cuda.convert_from_Piotr(ncentreV.astype(float))) rposnon = cort0.cort_image_right( retina_cuda.convert_from_Piotr(ncentreV.astype(float))) lnegnon = cort1.cort_image_left( retina_cuda.convert_from_Piotr(nsurrV.astype(float))) rnegnon = cort1.cort_image_right( retina_cuda.convert_from_Piotr(nsurrV.astype(float))) pos_cort_img_non = np.concatenate( (np.rot90(lposnon), np.rot90(rposnon, k=3)), axis=1) neg_cort_img_non = np.concatenate( (np.rot90(lnegnon), np.rot90(rnegnon, k=3)), axis=1) # merge left and right hemispheres mergedcortex = np.concatenate((pos_cort_img_non, neg_cort_img_non), axis=1) return merged, mergedcortex
def showNonOpponency(C, theta): """Summary This function encapsulates the routine to generate backprojected and cortical views for the magnocellular pathway retinal ganglion cells Args: C (vector): The sharp retina is passed to the function theta (float): A threshold value is passed to the function Returns: merged: Return a merged image of the backprojected view as a numpy image array mergecort: Return a merged image of the cortical view as a numpy image array """ # Sample using the other recepetive field, but with a temporally different image, lateimg S = retina.sample(lateimg, x, y, dcoeff[i], dloc[i], rgb=True) ncentreV, nsurrV = rgc.nonopponency(C, S, theta) ninverse = retina.inverse(ncentreV, x, y, dcoeff[i], dloc[i], GI, imsize=imgsize, rgb=False) ninv_crop = retina.crop(ninverse, x, y, dloc[i]) ninverse2 = retina.inverse(nsurrV, x, y, dcoeff[i], dloc[i], GI, imsize=imgsize, rgb=False) ninv_crop2 = retina.crop(ninverse2, x, y, dloc[i]) merged = np.concatenate((ninv_crop, ninv_crop2), axis=1) lposnon, rposnon = cortex.cort_img(ncentreV, L, L_loc, R, R_loc, cort_size, G) lnegnon, rnegnon = cortex.cort_img(nsurrV, L, L_loc, R, R_loc, cort_size, G) pos_cort_img = np.concatenate((np.rot90(lposnon), np.rot90(rposnon, k=3)), axis=1) neg_cort_img = np.concatenate((np.rot90(lnegnon), np.rot90(rnegnon, k=3)), axis=1) mergecort = np.concatenate((pos_cort_img, neg_cort_img), axis=1) return merged, mergecort