コード例 #1
0
    def __init__(self, width, height, rgb=True):
        """Initialise GPU retina and cortex.

        Args:
            height (int): height of image
            width (int): width of image
        """
        # Load in data
        retina_path = '/home/lewis/RetinaCUDA-master/Retinas'
        with open(retina_path + '/ret50k_loc.pkl', 'rb') as handle:
            loc50k = pickle.load(handle)
        with open(retina_path + '/ret50k_coeff.pkl', 'rb') as handle:
            coeff50k = pickle.load(handle)

        # Create retina and cortex
        L, R = cortex.LRsplit(loc50k)
        L_loc, R_loc = cortex.cort_map(L, R)
        L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc)
        if rgb:
            self.ret = retina_cuda.create_retina(
                loc50k, coeff50k, (height, width, 3),
                (int(width / 2), int(height / 2)))
            self.cort = cortex_cuda.create_cortex_from_fields_and_locs(
                L, R, L_loc, R_loc, cort_size, gauss100=G, rgb=True)
        else:
            self.ret = retina_cuda.create_retina(
                loc50k, coeff50k, (height, width),
                (int(width / 2), int(height / 2)))
            self.cort = cortex_cuda.create_cortex_from_fields_and_locs(
                L, R, L_loc, R_loc, cort_size, gauss100=G, rgb=False)
コード例 #2
0
ファイル: perf_test.py プロジェクト: lorincbalog/RetinaCUDA
def speedup(loc, coeff, img, rgb, show_res):
    '''
    This test measures the performance of the two implementation
    from initialisation to the end of the cortical transform
    '''
    init_p = time.time()
    GI = retina.gauss_norm_img(int(img.shape[1] / 2), int(img.shape[0] / 2),
                               coeff, loc, img.shape, rgb)

    init_c = time.time()
    ret = retina_cuda.create_retina(
        loc, coeff, img.shape, (int(img.shape[1] / 2), int(img.shape[0] / 2)))

    sample_p = time.time()
    V_p = retina.sample(img, img.shape[1] / 2, img.shape[0] / 2, coeff, loc,
                        rgb)

    sample_c = time.time()
    V_c = ret.sample(img)

    invert_p = time.time()
    inv_p = retina.inverse(V_p, img.shape[1] / 2, img.shape[0] / 2, coeff, loc,
                           GI, img.shape, rgb)

    invert_c = time.time()
    inv_c = ret.inverse(V_c)
    retina_end = time.time()

    cort_init_p = time.time()
    L, R = cortex.LRsplit(loc)
    L_loc, R_loc = cortex.cort_map(L, R)
    L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc)

    cort_init_c = time.time()
    cort = cortex_cuda.create_cortex_from_fields(loc, rgb=rgb)

    cort_img_p = time.time()
    l_p, r_p = cortex.cort_img(V_p, L, L_loc, R, R_loc, cort_size, G)

    cort_img_c = time.time()
    l_c = cort.cort_image_left(V_c)
    r_c = cort.cort_image_right(V_c)
    cort_end = time.time()

    print '%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,' % (init_c - init_p, sample_p - init_c, sample_c - sample_p, \
                                  invert_p - sample_c, invert_c - invert_p, retina_end - invert_c,\
                                  cort_init_c - cort_init_p, cort_img_p - cort_init_c, cort_img_c - cort_img_p, cort_end - cort_img_c)

    if show_res:
        cv2.namedWindow("inverse CUDA", cv2.WINDOW_NORMAL)
        cv2.imshow("inverse CUDA", inv_c)
        cv2.namedWindow("inverse Piotr", cv2.WINDOW_NORMAL)
        cv2.imshow("inverse Piotr", inv_p)
        c_c = np.concatenate((np.rot90(l_c), np.rot90(r_c, k=3)), axis=1)
        c_p = np.concatenate((np.rot90(l_p), np.rot90(r_p, k=3)), axis=1)
        cv2.namedWindow("cortex CUDA", cv2.WINDOW_NORMAL)
        cv2.imshow("cortex CUDA", c_c)
        cv2.namedWindow("cortex Piotr", cv2.WINDOW_NORMAL)
        cv2.imshow("cortex Piotr", c_p)
コード例 #3
0
ファイル: mainCamDemo.py プロジェクト: tomesparon/MScProj
def prepRF():
    """Summary
    Helper function that is used to pre-generate the cortical map locations, 
    before the main loop
    """
    global L, R, L_loc, R_loc, G, cort_size
    L, R = cortex.LRsplit(loc[i])
    L_loc, R_loc = cortex.cort_map(L, R)
    L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc)

    ret, img = cap.read()
    x = int(img.shape[1] / 2)
    y = int(img.shape[0] / 2)
    imgsize = (img.shape[0], img.shape[1])
    global GI
    GI = retina.gauss_norm_img(x,
                               y,
                               dcoeff[i],
                               dloc[i],
                               imsize=imgsize,
                               rgb=True)
コード例 #4
0
def correctness_test(loc, coeff, cap, rgb=False):
    '''
    CUDA code uses the minimal initialisation from the host,
    all tracatable values are computed on the GPU
    Get an image from the camera, generate inverse and cortical image 
    with both implementation and subtract the results
    '''
    r, img = cap.read()
    if not rgb: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # create CUDA objects to pass to evaluation
    ret = retina_cuda.create_retina(
        loc, coeff, img.shape, (int(img.shape[1] / 2), int(img.shape[0] / 2)),
        None)
    cort = cortex_cuda.create_cortex_from_fields(loc, rgb=rgb)

    while ord('q') != cv2.waitKey(10):
        r, img = cap.read()
        if not rgb: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        if r:
            '''
            Sample the image img with CUDA retina ret, inverse transform it with ret and 
            create the cortical image with CUDA cortex cort
            Sample and generate retina and cortical images from img with Piotrs's code
            Visually compare the results by showing the subtraction of the generatd images
            '''
            V_c = ret.sample(img)  # sample with CUDA
            inv_c = ret.inverse(V_c)  # inverse with CUDA

            l_c = cort.cort_image_left(V_c)  # left cortical image CUDA
            r_c = cort.cort_image_right(V_c)  # right cortical image CUDA
            c_c = np.concatenate(
                (np.rot90(l_c), np.rot90(r_c, k=3)),
                axis=1)  #concatenate the results into one image

            # create Piotr's retina and cortical images

            GI = retina.gauss_norm_img(int(img.shape[1] / 2),
                                       int(img.shape[0] / 2), coeff, loc,
                                       img.shape, rgb)
            L, R = cortex.LRsplit(loc)
            L_loc, R_loc = cortex.cort_map(L, R)
            L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc)
            V_p = retina.sample(img, img.shape[1] / 2, img.shape[0] / 2, coeff,
                                loc, rgb)
            inv_p = retina.inverse(V_p, img.shape[1] / 2, img.shape[0] / 2,
                                   coeff, loc, GI, img.shape, rgb)
            l_p, r_p = cortex.cort_img(V_p, L, L_loc, R, R_loc, cort_size, G)
            c_p = np.concatenate((np.rot90(
                l_p[:l_c.shape[0], :]), np.rot90(r_p[:r_c.shape[0], :], k=3)),
                                 axis=1)

            # show CUDA results
            cv2.namedWindow("inverse CUDA", cv2.WINDOW_NORMAL)
            cv2.imshow("inverse CUDA", inv_c)
            cv2.namedWindow("cortex CUDA", cv2.WINDOW_NORMAL)
            cv2.imshow("cortex CUDA", c_c)

            # show Piotr's results
            cv2.namedWindow("inverse Piotr", cv2.WINDOW_NORMAL)
            cv2.imshow("inverse Piotr", inv_p)
            cv2.namedWindow("cortex Piotr", cv2.WINDOW_NORMAL)
            cv2.imshow("cortex Piotr", c_p)

            # show the difference of the images
            cv2.namedWindow("inverse diff", cv2.WINDOW_NORMAL)
            cv2.imshow("inverse diff", np.power((inv_c - inv_p), 2) * 255)
            cv2.namedWindow("cortex diff", cv2.WINDOW_NORMAL)
            cv2.imshow("cortex diff", np.power((c_c - c_p), 2) * 255)
コード例 #5
0
def compatibility_test(loc, coeff, cap, rgb=False):
    '''
    CUDA code uses different initialisations,
    passed parameters are the results of Piotr's code
    Initialise retina and cortex with external parameters
    Process camera stream
    '''
    r, img = cap.read()
    if not rgb: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Get parameters calculated by Piotr's code
    GI = retina.gauss_norm_img(int(img.shape[1] / 2), int(img.shape[0] / 2),
                               coeff, loc, img.shape, rgb)
    L, R = cortex.LRsplit(loc)
    L_loc, R_loc = cortex.cort_map(L, R)
    L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc)
    # CUDA
    # first retina creates everything on the GPU, proved to be identical with Piotr's implementation
    ret0 = retina_cuda.create_retina(loc, coeff, img.shape,
                                     (img.shape[1] / 2, img.shape[0] / 2),
                                     None)
    # second retina uses the GI from Piotr
    ret1 = retina_cuda.create_retina(loc, coeff, img.shape,
                                     (img.shape[1] / 2, img.shape[0] / 2), GI)
    # first cortex creates everything on the GPU, proved to be identical with Piotr's implementation
    cort0 = cortex_cuda.create_cortex_from_fields(loc, rgb=rgb)
    # second cortex gets all the parameters from Piotr's code
    cort1 = cortex_cuda.create_cortex_from_fields_and_locs(
        L,
        R,
        L_loc,
        R_loc, (cort0.cort_image_size[0], cort_size[1]),
        gauss100=G,
        rgb=rgb)

    # read camera stream
    while ord('q') != cv2.waitKey(10):
        r, img = cap.read()
        if not rgb: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        if r:
            '''
            Sample the image img with ret0 and ret1, inverse transform the image vectors
            create the cortical image with cort0 and cort1
            Visually compare the results by showing the subtraction of the generatd images
            '''
            V0 = ret0.sample(img)  # sample with reference ret
            inv0 = ret0.inverse(V0)  # inverse with reference ret

            l_c0 = cort0.cort_image_left(
                V0)  # left cortical image reference cort
            r_c0 = cort0.cort_image_right(
                V0)  # right cortical image refernce cort
            c_c0 = np.concatenate(
                (np.rot90(l_c0), np.rot90(r_c0, k=3)),
                axis=1)  #concatenate the results into one image

            V1 = ret0.sample(img)  # sample with reference ret
            inv1 = ret0.inverse(V0)  # inverse with reference ret

            l_c1 = cort1.cort_image_left(
                V1)  # left cortical image reference cort
            r_c1 = cort1.cort_image_right(
                V1)  # right cortical image refernce cort
            c_c1 = np.concatenate(
                (np.rot90(l_c1[:, :]), np.rot90(r_c1[:, :], k=3)),
                axis=1)  #concatenate the results into one image

            # sampling error between the two instance
            print('Sampling difference: %f' % np.sum(V1 - V0))
            # show CUDA results
            cv2.namedWindow("inverse ref", cv2.WINDOW_NORMAL)
            cv2.imshow("inverse ref", inv0)
            cv2.namedWindow("cortex ref", cv2.WINDOW_NORMAL)
            cv2.imshow("cortex ref", c_c0)

            # show Piotr's results
            cv2.namedWindow("inverse toprove", cv2.WINDOW_NORMAL)
            cv2.imshow("inverse toprove", inv1)
            cv2.namedWindow("cortex toprove", cv2.WINDOW_NORMAL)
            cv2.imshow("cortex toprove", c_c1)

            # show the difference of the images
            print len(
                np.nonzero(inv0 - inv1)[0]) / (img.shape[0] * img.shape[1])
            cv2.namedWindow("inverse diff", cv2.WINDOW_NORMAL)
            cv2.imshow("inverse diff", np.abs(inv0 - inv1) * 255)
            cv2.namedWindow("cortex diff", cv2.WINDOW_NORMAL)
            cv2.imshow("cortex diff", np.abs(c_c0 - c_c1) * 255)
コード例 #6
0
dcoeff[3] = scipy.io.loadmat(mat_data + '\coeff256_od.mat')['tess256']
dloc[0] = scipy.io.loadmat(mat_data + '\loc8k_od.mat')['loc8k']
dloc[1] = scipy.io.loadmat(mat_data + '\loc4k_od.mat')['loc4k']
dloc[2] = scipy.io.loadmat(mat_data + '\loc1k_od.mat')['loc1k']
dloc[3] = scipy.io.loadmat(mat_data + '\loc256_od.mat')['tess256']

i = 0

showInverse = True
showCortex = True

font = cv2.FONT_HERSHEY_PLAIN
types = ["RG", "GR", "RGinv", "GRinv", "BY", "YB", "BYinv", "YBinv"]

global L, R, L_loc, R_loc, G, cort_size
L, R = cortex.LRsplit(loc[i])
L_loc, R_loc = cortex.cort_map(L, R)
L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc)

# read in an image file
stdimg_dir = os.getcwd() + os.sep + 'testimage\\'
print "Using " + os.listdir(stdimg_dir)[0]
name = os.listdir(stdimg_dir)[0]
img = cv2.imread(stdimg_dir + name, )
x, y = img.shape[1] / 2, img.shape[0] / 2
xx, yy = 1, img.shape[0] / 10
imgsize = img.shape

# generate gaussian normalised image
GI = retina.gauss_norm_img(x, y, dcoeff[i], dloc[i], imsize=imgsize, rgb=True)
コード例 #7
0
    while not cap.isOpened():
        print 'retrying\n'
        cv2.VideoCapture(camid).release()
        cap = cv2.VideoCapture(camid)
        camid += 1

    retinas = np.empty((2, 5), dtype=object)
    cortexes = np.empty((2, 5), dtype=object)

    r, img = cap.read()
    while not r:
        r, img = cap.read()

    img = cv2.resize(img, img_sizes[img_ind])
    L, R = cortex.LRsplit(loc50k)
    L_loc, R_loc = cortex.cort_map(L, R)
    L_loc, R_loc, G, cort_size = cortex.cort_prepare(L_loc, R_loc)

    if rgb:
        ret = retina_cuda.create_retina(
            loc50k, coeff50k, img.shape,
            (int(img.shape[1] / 2), int(img.shape[0] / 2)))
    else:
        ret = retina_cuda.create_retina(
            loc50k, coeff50k, (img.shape[0], img.shape[1]),
            (int(img.shape[1] / 2), int(img.shape[0] / 2)))
    cort = cortex_cuda.create_cortex_from_fields_and_locs(L,
                                                          R,
                                                          L_loc,
                                                          R_loc,