def getAverageValues2(self, images):
        """ get the average values over all the images
            for every two images, divides the images by 2
            then adds them together
        """

        if len(images) == 0:
            return None
        if len(images) == 1:
            return images[0]

        width = images[0].width
        height = images[0].height
        # create image with only 2's
        # this will be used for division
        divisionImage = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)
        cv.Set(divisionImage, 2)
        image1 = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)
        image2 = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)

        avgImage = cv.CloneImage(images[0])
        for image in images:
            # divide images by 2
            cv.Div(avgImage, divisionImage, image1)
            cv.Div(image, divisionImage, image2)

            # add them to get result
            cv.Add(image1, image2, avgImage)

        return avgImage
Beispiel #2
0
def get_normalized_rgb_planes(r, g, b):
    size = cv.GetSize(r)
    #    r,g,b = get_three_planes(img)

    nr_plane = cv.CreateImage(size, 8, 1)
    ng_plane = cv.CreateImage(size, 8, 1)
    nb_plane = cv.CreateImage(size, 8, 1)

    r32 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    g32 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    b32 = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    sum = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    cv.Zero(sum)
    cv.Convert(r, r32)
    cv.Convert(g, g32)
    cv.Convert(b, b32)

    cv.Add(r32, g32, sum)
    cv.Add(b32, sum, sum)

    tmp = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
    cv.Div(r32, sum, tmp)
    cv.ConvertScale(tmp, nr_plane, scale=255)
    cv.Div(g32, sum, tmp)
    cv.ConvertScale(tmp, ng_plane, scale=255)
    cv.Div(b32, sum, tmp)
    cv.ConvertScale(tmp, nb_plane, scale=255)

    #    res = image_empty_clone(img)
    #    cv.Merge(nr_plane,ng_plane,nb_plane,None,res)
    return nr_plane, ng_plane, nb_plane
def image_callback(data):
    global running
    if (running):
        image = bridge.imgmsg_to_cv(data, "bgr8")

        #normalize image
        cv.Split(image, rgb_r, rgb_g, rgb_b, None)
        red_mean = cv2.mean(np.asarray(rgb_r[:, :]))
        cv.Div(src2=cv.fromarray(np.ones((480, 640))),
               src1=rgb_r,
               dst=scaled_r,
               scale=128 / red_mean[0])
        green_mean = cv2.mean(np.asarray(rgb_g[:, :]))
        cv.Div(src2=cv.fromarray(np.ones((480, 640))),
               src1=rgb_g,
               dst=scaled_g,
               scale=128 / green_mean[0])
        blue_mean = cv2.mean(np.asarray(rgb_b[:, :]))
        cv.Div(src2=cv.fromarray(np.ones((480, 640))),
               src1=rgb_b,
               dst=scaled_b,
               scale=128 / blue_mean[0])
        cv.Merge(scaled_r, scaled_g, scaled_b, None, cv_image)

        cv.CvtColor(cv_image, hsv, cv.CV_BGR2HSV)  # --convert from BGR to HSV
        cv.CvtColor(cv_image, lab, cv.CV_BGR2Lab)

        cv.Split(hsv, hsv_h, hsv_s, hsv_v, None)
        cv.Split(cv_image, rgb_r, rgb_g, rgb_b, None)
        cv.Split(lab, lab_l, lab_a, lab_b, None)
        cv.Split(luv, luv_l, luv_u, luv_v, None)
        cv.Split(hls, hls_h, hls_l, hls_s, None)
        cv.Split(xyz, xyz_x, xyz_y, xyz_x, None)
        cv.Split(ycrcb, ycrcb_y, ycrcb_cr, ycrcb_cb, None)

        cv.Not(lab_a, a_not)
        cv.Sub(hsv_s, a_not, sa)
        cv.Sub(luv_u, hls_h, test)
        cv.Sub(hls_s, hls_h, sminh)

        threshold_red(sa)

        cv.ShowImage("red", red_dilated_image)

        red_contours, _ = cv2.findContours(image=np.asarray(
            red_dilated_image[:, :]),
                                           mode=cv.CV_RETR_EXTERNAL,
                                           method=cv.CV_CHAIN_APPROX_SIMPLE)

        print_lidar_projections(cv_image)

        circles = extract_circles(red_contours, [1, 0, 0])
        for x, y, radius in circles:
            cv.Circle(cv_image, (x, y), radius, [0, 0, 1], 3)

        cv.SetMouseCallback("camera feed", mouse_callback, hsv_image)
        cv.ShowImage("camera feed", cv_image)

        cv.WaitKey(3)
Beispiel #4
0
def main():
    root = "/Users/soswow/Documents/Face Detection/test/negative"
    #    root = "/Users/soswow/Documents/Face Detection/test/sets/negative"
    #    root = "/Users/soswow/Documents/Face Detection/test/edge_view/positive"
    #    root = "/Users/soswow/Documents/Face Detection/test/sobel/positive"
    #    root = "/Users/soswow/Documents/Face Detection/test/sets/positive"
    #    root = "/Users/soswow/Documents/Face Detection/test/falses"

    for folder in os.listdir(root):
        path = p.join(root, folder)
        if p.isdir(path):
            sum = cv.CreateMat(32, 32, cv.CV_32F)
            cv.Zero(sum)
            k = 0
            for path, _ in directory_files(path):
                try:
                    img = cv.LoadImage(path, iscolor=False)
                except IOError:
                    continue
                mat = cv.CreateMat(32, 32, cv.CV_32F)
                cv.Zero(mat)
                cv.Convert(cv.GetMat(img), mat)
                cv.Add(mat, sum, sum)
                k += 1
            avg = cv.CreateMat(32, 32, cv.CV_32F)
            cv.Zero(avg)
            count = cv.CreateMat(32, 32, cv.CV_32F)
            cv.Zero(count)
            cv.Set(count, k)
            cv.Div(sum, count, avg)
            new_img = cv.CreateImage((32, 32), 8, 0)
            cv.Zero(new_img)
            cv.Convert(avg, new_img)
            cv.SaveImage(p.join(root, "%s-avg.png" % folder), new_img)
def fade_edges(projection_pattern, fade_width):
    divisor = cv.CreateMat(projection_pattern.height, projection_pattern.width,
                           cv.CV_8UC1)
    cv.Set(divisor, 1)
    for i in range(fade_width):
        cv.Rectangle(divisor, (i, i), (projection_pattern.width - 1 - i,
                                       projection_pattern.height - 1 - i),
                     int(((-5.0 / fade_width) * i) + 6))
    cv.Div(projection_pattern, divisor, projection_pattern)
    return projection_pattern
Beispiel #6
0
 def sub_image(self, imagecurr, imageprev, divid=True):
     imagesize = (imagecurr.width, imagecurr.height)
     image = cv.CreateImage(imagesize, cv.IPL_DEPTH_8U, 1)
     cv.Sub(imagecurr, imageprev, image)
     # use pyramid/cone to ponderate the weight
     # ie. moves in corners are more important than in the center
     if divid:
         cv.Div(image, self.cone, image)
     cv.Flip(image, flipMode=1) # for webcam
     return image
Beispiel #7
0
def FormatImage(img, oimg, off, scale, correction):
    global i01

    #print img.height,img.width
    #print oimg.height,oimg.width
    cv.Transpose(img,oimg)
    cv.Flip(oimg,None,0)

    if(correction):
        cv.AddS(oimg, off, oimg)
        cv.Div(oimg, i01, oimg, scale)
Beispiel #8
0
	def crunch():
		size = cv.GetSize(band3)
		assert size == cv.GetSize(band4)
		numerator = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
		cv.Sub(band4, band3, numerator)
		denominator = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
		cv.Add(band4, band3, denominator)
		ndvi_img = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
		cv.Div(numerator, denominator, ndvi_img)

		# (NDVI + 1)
		cv.AddS(ndvi_img, 1, ndvi_img)
		return ndvi_img
Beispiel #9
0
def repeat():
    global CAM_CFG, lastframe, diffframe, camframe, camhist, camhist_img
    frame = cv.GetSubRect(cv.QueryFrame(CAM_CFG['handler']), CAM_CFG['roi'])

    # This takes 1% CPU:
    #framearr = cv2array(frame)
    # This takes 7% CPU:
    #framearr = framearr.astype(np.float64)*1.0/256
    # This takes 3% CPU:
    cv.ConvertScale(frame, camframe, scale=1.0 / 256)
    # This takes 2% CPU:
    #camframearr = cv2array(camframe)

    # Calculate (cam-dark)/flat. Without mask might be faster sometimes.
    cv.Sub(camframe, darkframe, camframe, mask=CAM_CFG['mask'])
    cv.Div(camframe, flatframe, camframe, 1)

    # # Calculate cam - last
    cv.Sub(camframe, lastframe, diffframe, mask=CAM_CFG['mask'])

    # Make histogram of camframe
    camhist, camhist_img = calc_1dhisto(camframe,
                                        nbin=ibins,
                                        scale=scale,
                                        histh=histh,
                                        hist=camhist,
                                        histimg=camhist_img)

    if (LIVE):
        cv.ShowImage("cam_live", camframe)
        cv.ShowImage("cam_other", diffframe)
        cv.ShowImage("cam_histo", camhist_img)

        c = cv.WaitKey(10)
        if (c == "n"
            ):  #in "n" key is pressed while the popup window is in focus
            pass

    CAM_CFG['buf'][(CAM_CFG['frameidx']) % len(CAM_CFG['buf'])] = camframe
    CAM_CFG['frameidx'] += 1
    lastframe = cv.CloneImage(camframe)
def ray_plane_intersections(rays, planes):
    rows = rays.height
    cols = rays.width

    rays_split = [None] * 3
    for i in range(3):
        rays_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Split(rays, rays_split[0], rays_split[1], rays_split[2], None)

    planes_split = [None] * 4
    for i in range(4):
        planes_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Split(planes, planes_split[0], planes_split[1], planes_split[2],
             planes_split[3])

    n_dot_v = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.SetZero(n_dot_v)
    for i in range(3):
        temp = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Mul(planes_split[i], rays_split[i], temp)
        cv.Add(temp, n_dot_v, n_dot_v)
    depth = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Div(planes_split[3], n_dot_v, depth)

    intersection_points_split = [None] * 3
    for i in range(3):
        intersection_points_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1)

    for i in range(3):
        cv.Mul(depth, rays_split[i], intersection_points_split[i])

    intersection_points = cv.CreateMat(rows, cols, cv.CV_32FC3)
    cv.Merge(intersection_points_split[0], intersection_points_split[1],
             intersection_points_split[2], None, intersection_points)

    return intersection_points
Beispiel #11
0
def DetectaSombra(frame, bg):

    dbg = 1

    if dbg:
        t1 = time.time()

    print 'Detectando sombras na imagem...'

    # gera as imagens de cada canal RGB
    imgCinza = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    imgHSV = cv.CloneImage(frame)

    imgH = cv.CloneImage(imgCinza)
    imgS = cv.CloneImage(imgCinza)
    imgV = cv.CloneImage(imgCinza)

    imgR = cv.CloneImage(imgCinza)
    imgG = cv.CloneImage(imgCinza)
    imgB = cv.CloneImage(imgCinza)

    bgCinza = cv.CreateImage(cv.GetSize(bg), cv.IPL_DEPTH_8U, 1)
    bgHSV = cv.CloneImage(bg)

    bgH = cv.CloneImage(bgCinza)
    bgS = cv.CloneImage(bgCinza)
    bgV = cv.CloneImage(bgCinza)

    bgR = cv.CloneImage(bgCinza)
    bgG = cv.CloneImage(bgCinza)
    bgB = cv.CloneImage(bgCinza)

    # gera as imagens de cada frame e backgroun nos canais de HSV e RGB
    cv.CvtColor(frame, imgHSV, cv.CV_BGR2HSV)
    cv.Split(imgHSV, imgH, imgS, imgV, None)
    cv.Split(frame, imgR, imgG, imgB, None)

    cv.CvtColor(bg, bgHSV, cv.CV_BGR2HSV)
    cv.Split(bgHSV, bgH, bgS, bgV, None)
    cv.Split(bg, bgR, bgG, bgB, None)

    # inicio de calculos para descobrir sombras.
    ivbv = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    cv.Div(imgV, bgV, ivbv, 255)

    isbs = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    cv.Sub(imgS, bgS, isbs)

    ihbh = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    cv.AbsDiff(imgH, bgH, ihbh)

    # parametros de deteccao de sombra
    alfa = 190
    beta = 210

    thrSat = 20
    thrHue = 50

    alfa = 220
    beta = 240

    thrSat = 90
    thrHue = 90

    nErode = 0
    nDilate = 0

    # trata ivbv
    imgThr_ivbv = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    # deixa apenas os menores que beta
    cv.Threshold(ivbv, imgThr_ivbv, beta, 255, cv.CV_THRESH_TRUNC)
    # deixa apenas os maiores que alfa
    cv.Threshold(imgThr_ivbv, imgThr_ivbv, alfa, 255, cv.CV_THRESH_TOZERO)
    # binariza
    cv.Threshold(imgThr_ivbv, imgThr_ivbv, alfa, 255, cv.CV_THRESH_BINARY)

    # trata isbs
    imgThr_isbs = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    # deixa apenas os menores que thrSat
    cv.Threshold(isbs, imgThr_isbs, thrSat, 255, cv.CV_THRESH_BINARY)

    # trata isbs
    imgThr_ihbh = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
    # deixa apenas os menores que thrSat
    cv.Threshold(ihbh, imgThr_ihbh, thrHue, 255, cv.CV_THRESH_BINARY_INV)

    # onde é preto em todas as imagens, é sombra
    imgSombra = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

    cv.Not(imgThr_ivbv, imgThr_ivbv)
    cv.Not(imgThr_isbs, imgThr_isbs)

    cv.And(imgThr_ivbv, imgThr_isbs, imgSombra)

    cv.Not(imgThr_ihbh, imgThr_ihbh)

    cv.And(imgSombra, imgThr_ihbh, imgSombra)

    for i in range(nErode):
        cv.Erode(imgSombra, imgSombra)

    for i in range(nDilate):
        cv.Dilate(imgSombra, imgSombra)

    if dbg:
        print 'Tempo para detectar sombras: %.5f' % (time.time() - t1)
    #exibe frames de saida

    #destaca de verde a sombra sobre o frame
    frameDestacado = cv.CloneImage(frame)

    cv.Or(imgG, imgSombra, imgG)

    cv.Merge(imgR, imgG, imgB, None, frameDestacado)
    '''    
    cv.ShowImage('frameDestacado',frameDestacado)
    cv.WaitKey()
    '''

    retorno = {}
    retorno['sombra'] = imgSombra
    retorno['sombraDestacada'] = frameDestacado

    return retorno

    cv.ShowImage('ivbv', ivbv)
    cv.ShowImage('isbs', isbs)
    cv.ShowImage('ihbh', ihbh)

    cv.ShowImage('imgThr_isbs', imgThr_isbs)
    cv.ShowImage('imgThr_ivbv', imgThr_ivbv)
    cv.ShowImage('imgThr_ihbh', imgThr_ihbh)

    cv.ShowImage('imgSombra', imgSombra)

    cv.WaitKey()

    sys.exit()

    frameMerge = cv.CloneImage(frame)
    cv.Merge(imgR, imgR, imgR, None, frameMerge)

    cv.ShowImage('frame', frame)
    cv.ShowImage('frameMerge', frameMerge)

    cv.ShowImage('imgR', imgR)
    cv.ShowImage('imgG', imgG)
    cv.ShowImage('imgB', imgB)

    cv.ShowImage('imgH', imgH)
    cv.ShowImage('imgS', imgS)
    cv.ShowImage('imgV', imgV)

    cv.WaitKey()

    return 0
Beispiel #12
0
def doSSIM(frame1, frame2):
    '''
    The equivalent of Zhou Wang's SSIM matlab code using OpenCV.
    from http://www.cns.nyu.edu/~zwang/files/research/ssim/index.html
    The measure is described in :
    "Image quality assessment: From error measurement to structural similarity"
    C++ code by Rabah Mehdi. http://mehdi.rabah.free.fr/SSIM

    C++ to Python translation and adaptation by Iñaki Úcar
    '''
    def array2cv(a):
        dtype2depth = {
            'uint8': cv.IPL_DEPTH_8U,
            'int8': cv.IPL_DEPTH_8S,
            'uint16': cv.IPL_DEPTH_16U,
            'int16': cv.IPL_DEPTH_16S,
            'int32': cv.IPL_DEPTH_32S,
            'float32': cv.IPL_DEPTH_32F,
            'float64': cv.IPL_DEPTH_64F,
        }
        try:
            nChannels = a.shape[2]
        except:
            nChannels = 1
        cv_im = cv.CreateImageHeader((a.shape[1], a.shape[0]),
                                     dtype2depth[str(a.dtype)], nChannels)
        cv.SetData(cv_im, a.tostring(),
                   a.dtype.itemsize * nChannels * a.shape[1])
        return cv_im

    C1 = 6.5025
    C2 = 58.5225
    img1_temp = array2cv(frame1)
    img2_temp = array2cv(frame2)
    nChan = img1_temp.nChannels
    d = cv.IPL_DEPTH_32F
    size = img1_temp.width, img1_temp.height
    img1 = cv.CreateImage(size, d, nChan)
    img2 = cv.CreateImage(size, d, nChan)
    cv.Convert(img1_temp, img1)
    cv.Convert(img2_temp, img2)
    img1_sq = cv.CreateImage(size, d, nChan)
    img2_sq = cv.CreateImage(size, d, nChan)
    img1_img2 = cv.CreateImage(size, d, nChan)
    cv.Pow(img1, img1_sq, 2)
    cv.Pow(img2, img2_sq, 2)
    cv.Mul(img1, img2, img1_img2, 1)
    mu1 = cv.CreateImage(size, d, nChan)
    mu2 = cv.CreateImage(size, d, nChan)
    mu1_sq = cv.CreateImage(size, d, nChan)
    mu2_sq = cv.CreateImage(size, d, nChan)
    mu1_mu2 = cv.CreateImage(size, d, nChan)
    sigma1_sq = cv.CreateImage(size, d, nChan)
    sigma2_sq = cv.CreateImage(size, d, nChan)
    sigma12 = cv.CreateImage(size, d, nChan)
    temp1 = cv.CreateImage(size, d, nChan)
    temp2 = cv.CreateImage(size, d, nChan)
    temp3 = cv.CreateImage(size, d, nChan)
    ssim_map = cv.CreateImage(size, d, nChan)
    #/*************************** END INITS **********************************/
    #// PRELIMINARY COMPUTING
    cv.Smooth(img1, mu1, cv.CV_GAUSSIAN, 11, 11, 1.5)
    cv.Smooth(img2, mu2, cv.CV_GAUSSIAN, 11, 11, 1.5)
    cv.Pow(mu1, mu1_sq, 2)
    cv.Pow(mu2, mu2_sq, 2)
    cv.Mul(mu1, mu2, mu1_mu2, 1)
    cv.Smooth(img1_sq, sigma1_sq, cv.CV_GAUSSIAN, 11, 11, 1.5)
    cv.AddWeighted(sigma1_sq, 1, mu1_sq, -1, 0, sigma1_sq)
    cv.Smooth(img2_sq, sigma2_sq, cv.CV_GAUSSIAN, 11, 11, 1.5)
    cv.AddWeighted(sigma2_sq, 1, mu2_sq, -1, 0, sigma2_sq)
    cv.Smooth(img1_img2, sigma12, cv.CV_GAUSSIAN, 11, 11, 1.5)
    cv.AddWeighted(sigma12, 1, mu1_mu2, -1, 0, sigma12)
    #//////////////////////////////////////////////////////////////////////////
    #// FORMULA
    #// (2*mu1_mu2 + C1)
    cv.Scale(mu1_mu2, temp1, 2)
    cv.AddS(temp1, C1, temp1)
    #// (2*sigma12 + C2)
    cv.Scale(sigma12, temp2, 2)
    cv.AddS(temp2, C2, temp2)
    #// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
    cv.Mul(temp1, temp2, temp3, 1)
    #// (mu1_sq + mu2_sq + C1)
    cv.Add(mu1_sq, mu2_sq, temp1)
    cv.AddS(temp1, C1, temp1)
    #// (sigma1_sq + sigma2_sq + C2)
    cv.Add(sigma1_sq, sigma2_sq, temp2)
    cv.AddS(temp2, C2, temp2)
    #// ((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
    cv.Mul(temp1, temp2, temp1, 1)
    #// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
    cv.Div(temp3, temp1, ssim_map, 1)
    index_scalar = cv.Avg(ssim_map)
    #// through observation, there is approximately
    #// 1% error max with the original matlab program
    return index_scalar[0]
def _div_and_sub(src_dst_image, other_image, threes_image):
    cv.Div(other_image, threes_image, other_image) 
    cv.Sub(src_dst_image, other_image, src_dst_image) 
Beispiel #14
0
def image_callback(data):
    global running, color
    if (running):
        #print "running"
        image = bridge.imgmsg_to_cv(data, "bgr8")

        #normalize image
        cv.Split(image, rgb_r, rgb_g, rgb_b, None)
        red_mean = cv2.mean(numpy.asarray(rgb_r[:, :]))
        cv.Div(src2=cv.fromarray(numpy.ones((480, 640))),
               src1=rgb_r,
               dst=scaled_r,
               scale=128 / red_mean[0])
        green_mean = cv2.mean(numpy.asarray(rgb_g[:, :]))
        cv.Div(src2=cv.fromarray(numpy.ones((480, 640))),
               src1=rgb_g,
               dst=scaled_g,
               scale=128 / green_mean[0])
        blue_mean = cv2.mean(numpy.asarray(rgb_b[:, :]))
        cv.Div(src2=cv.fromarray(numpy.ones((480, 640))),
               src1=rgb_b,
               dst=scaled_b,
               scale=128 / blue_mean[0])
        cv.Merge(scaled_r, scaled_g, scaled_b, None, cv_image)

        cv.CvtColor(cv_image, hsv, cv.CV_BGR2HSV)
        cv.CvtColor(cv_image, lab, cv.CV_BGR2Lab)
        cv.CvtColor(cv_image, luv, cv.CV_BGR2Luv)
        cv.CvtColor(cv_image, hls, cv.CV_BGR2HLS)
        cv.CvtColor(cv_image, xyz, cv.CV_BGR2XYZ)
        cv.CvtColor(cv_image, ycrcb, cv.CV_BGR2YCrCb)

        cv.Split(hsv, hsv_h, hsv_s, hsv_v, None)
        cv.Split(cv_image, rgb_r, rgb_g, rgb_b, None)
        cv.Split(lab, lab_l, lab_a, lab_b, None)
        cv.Split(luv, luv_l, luv_u, luv_v, None)
        cv.Split(hls, hls_h, hls_l, hls_s, None)
        cv.Split(xyz, xyz_x, xyz_y, xyz_x, None)
        cv.Split(ycrcb, ycrcb_y, ycrcb_cr, ycrcb_cb, None)

        cv.Not(lab_a, a_not)
        cv.Sub(hsv_s, a_not, sa)
        cv.Sub(luv_u, hls_h, test)
        '''
        cv.CvtColor(cv_image,gray,cv.CV_BGR2GRAY)
        cv.Smooth(gray,blurred_gray,cv.CV_GAUSSIAN,3,3)
        small = cv2.resize(numpy.asarray(ycrcb_cr[:,:]),(320,240)
        circles=cv2.HoughCircles(image=numpy.asarray(small[:,:]),method=cv.CV_HOUGH_GRADIENT,dp=1,minDist=1,param1=100,param2=60, minRadius=1,maxRadius=600)
        
        if not(circles is None):
            for i in circles:
                if (i[0][1] < 200):
                    print "found circles",i,len(i)
                    center = (i[0][0],i[0][1])
                    radius = i[0][2]
                    cv.Circle(cv_image,center,radius,(1,1,0),2)
        '''

        cv.Mul(test, red_adaptive, final)

        if (color == 'red'):
            threshold_red(sa)
            #threshold_red(ycrcb_cr)
            red_contours, _ = cv2.findContours(
                image=numpy.asarray(red_dilated_image[:, :]),
                mode=cv.CV_RETR_EXTERNAL,
                method=cv.CV_CHAIN_APPROX_SIMPLE)
            circles = extract_circles(red_contours, [1, 0, 0])
            for x, y, radius in circles:
                cv.Circle(cv_image, (x, y), radius, [0, 0, 255], 3)
        elif (color == 'purple'):
            threshold_purple(lab_a)
            purple_contours, _ = cv2.findContours(
                image=numpy.asarray(purple_dilated_image[:, :]),
                mode=cv.CV_RETR_EXTERNAL,
                method=cv.CV_CHAIN_APPROX_SIMPLE)
            circles = extract_circles(purple_contours, [1, 0, 1])
            for x, y, radius in circles:
                cv.Circle(cv_image, (x, y), radius, [255, 0, 255], 3)

        cv.ShowImage("red", red_adaptive)
        cv.ShowImage("purple", purple_adaptive)
        cv.ShowImage("camera feed", cv_image)

        cv.WaitKey(3)
Beispiel #15
0
def cam_getimage(show=False,
                 dfcorr=True,
                 raw=False,
                 showhisto=True,
                 waitkey=25):
    """
	Get image from the camera, convert, scale, dark-flat correct,
	optionally show this and return as numpy.ndarray.

	If **raw* is set, return the (scaled/ROI'd) image as CvImage

	If CAM_CFG['flat'] or CAM_CFG['dark'] are set, use these to dark-flat 
	correct the image.

	@param [in] show Show image after acquisition
	@param [in] dfcorr Do dark-flat correction
	@param [in] raw Return raw IplImage (scaled and ROI'd, w/o DF correction)
	@param [in] showhisto Show histogram as well (only with **show**)
	@param [in] waitkey Wait time for cv.WaitKey() If 0, don't call. (only with **show**)
	@return Image data as numpy.ndarray
	"""

    if (not CAM_CFG['handle']): return

    rawframe = cv.CloneImage(cv.QueryFrame(CAM_CFG['handle']))

    # Downscale color images
    if (rawframe.channels > 1):
        rawsz = cv.GetSize(rawframe)
        if (not CAM_CFG.has_key('rawyuv') or not CAM_CFG['rawyuv']):
            CAM_CFG['rawyuv'] = cv.CreateImage(rawsz, rawframe.depth, 3)
            CAM_CFG['rawgray'] = cv.CreateImage(rawsz, rawframe.depth, 1)
        cv.CvtColor(rawframe, CAM_CFG['rawyuv'], cv.CV_BGR2YCrCb)
        cv.Split(CAM_CFG['rawyuv'], CAM_CFG['rawgray'], None, None, None)
        rawframe = CAM_CFG['rawgray']

    if (CAM_CFG['roi']):
        rawframe = cv.GetSubRect(rawframe, tuple(CAM_CFG['roi']))

    procf = CAM_CFG['frame']
    cv.ConvertScale(rawframe, procf, scale=1.0 / 256)

    if (raw):
        return cv.CloneImage(procf)

    if (CAM_CFG.has_key('dark') and dfcorr):
        cv.Sub(procf, CAM_CFG['dark'], procf)
    if (CAM_CFG.has_key('flat') and dfcorr):
        cv.Div(procf, CAM_CFG['flat'], procf)
    # We *don't* apply the aperture mask here because we might need the data

    if (show):
        cv.ShowImage(CAM_CFG['window'], procf)
        if (showhisto):
            global camhist, camhistimg
            camhist, camhistimg = calc_1dhisto(procf,
                                               hist=camhist,
                                               histimg=camhistimg)
            cv.ShowImage("cam_histogram", camhistimg)
        if (waitkey):
            cv.WaitKey(waitkey)

    depth2dtype = {
        cv.IPL_DEPTH_32F: 'float32',
        cv.IPL_DEPTH_64F: 'float64',
    }

    framearr = np.fromstring(procf.tostring(),
                             dtype=depth2dtype[procf.depth],
                             count=procf.width * procf.height *
                             procf.nChannels)
    framearr.shape = (procf.height, procf.width, procf.nChannels)

    return framearr[:, :, 0]
Beispiel #16
0
def image_callback(data):
    global running
    #print "running"
    if (running):

        image = bridge.imgmsg_to_cv(data, "bgr8")

        #normalize image
        cv.Split(image, rgb_r, rgb_g, rgb_b, None)
        red_mean = cv2.mean(np.asarray(rgb_r[:, :]))
        cv.Div(src2=cv.fromarray(np.ones((480, 640))),
               src1=rgb_r,
               dst=scaled_r,
               scale=128 / red_mean[0])
        green_mean = cv2.mean(np.asarray(rgb_g[:, :]))
        cv.Div(src2=cv.fromarray(np.ones((480, 640))),
               src1=rgb_g,
               dst=scaled_g,
               scale=128 / green_mean[0])
        blue_mean = cv2.mean(np.asarray(rgb_b[:, :]))
        cv.Div(src2=cv.fromarray(np.ones((480, 640))),
               src1=rgb_b,
               dst=scaled_b,
               scale=128 / blue_mean[0])
        cv.Merge(scaled_r, scaled_g, scaled_b, None, cv_image)

        cv.CvtColor(cv_image, hsv_image,
                    cv.CV_BGR2HSV)  # --convert from BGR to HSV
        cv.CvtColor(cv_image, lab_image_, cv.CV_BGR2Lab)
        cv.CvtColor(cv_image, ycrcb, cv.CV_BGR2YCrCb)

        cv.Smooth(cv_image, blurred_bgr_image, cv.CV_GAUSSIAN, 9, 9)
        cv.Smooth(hsv_image, blurred_image, cv.CV_GAUSSIAN, 5, 5)
        cv.Split(blurred_image, h_channel, s_channel, v_channel, None)
        cv.Split(lab_image_, l_channel, a_channel, b_channel, None)
        cv.Split(ycrcb, ycrcb_y, ycrcb_cr, ycrcb_cb, None)

        cv.Sub(s_channel, h_channel, sminh)
        cv.Sub(s_channel, a_channel, hmina)
        cv.Sub(ycrcb_cr, hmina, final)
        #cv.Sub(s_channel,v_channel,sminv) #maybe use for blue

        threshold_red(final)
        threshold_green(final)
        #threshold_yellow(blurred_image)
        #threshold_blue(h_channel)

        #cv.ShowImage("test",sminv)
        cv.ShowImage("red", red_dilated_image)
        #cv.ShowImage("yellow",yellow_adaptive)
        #cv.ShowImage("blue",blue_dilated_image)
        cv.ShowImage("green", green_dilated_image)

        red_contours, _ = cv2.findContours(image=np.asarray(
            red_dilated_image[:, :]),
                                           mode=cv.CV_RETR_EXTERNAL,
                                           method=cv.CV_CHAIN_APPROX_SIMPLE)
        green_contours, _ = cv2.findContours(image=np.asarray(
            green_dilated_image[:, :]),
                                             mode=cv.CV_RETR_EXTERNAL,
                                             method=cv.CV_CHAIN_APPROX_SIMPLE)
        yellow_contours, _ = cv2.findContours(image=np.asarray(
            yellow_dilated_image[:, :]),
                                              mode=cv.CV_RETR_EXTERNAL,
                                              method=cv.CV_CHAIN_APPROX_SIMPLE)
        blue_contours, _ = cv2.findContours(image=np.asarray(
            blue_dilated_image[:, :]),
                                            mode=cv.CV_RETR_EXTERNAL,
                                            method=cv.CV_CHAIN_APPROX_SIMPLE)
        #cv2.drawContours(np.asarray(cv_image[:,:]),red_contours,-1,(0,0,255),3)

        print_lidar_projections(cv_image)

        for i in [
            (green_contours, [0, 1, 0]), (red_contours, [1, 0, 0])
        ]:  #(yellow_contours,[1,1,0]) , ]:# , (blue_contours,[0,0,1])]:
            circles = extract_circles(i[0], i[1])
            rgb = i[1]
            bgr = (255 * np.array(rgb[::-1])).tolist(
            )  #invert list and multiply by 255 for cv.Circle color argument
            for x, y, radius in circles:
                cv.Circle(cv_image, (x, y), radius, bgr, 3)

        cv.SetMouseCallback("camera feed", mouse_callback, hsv_image)
        #cv.ShowImage("l channel",l_channel)
        #cv.ShowImage("a channel",a_channel)
        #cv.ShowImage("b channel",b_channel)
        cv.ShowImage("camera feed", cv_image)

        cv.WaitKey(3)
Beispiel #17
0
def line_line_intersections(P_0, u, Q_0, v):
    rows = P_0.height
    cols = P_0.width
    w_0 = cv.CreateMat(rows, cols, cv.CV_32FC3)

    cv.Sub(P_0, Q_0, w_0)

    a = element_wise_dot_product(u, u)
    b = element_wise_dot_product(u, v)
    c = element_wise_dot_product(v, v)
    d = element_wise_dot_product(u, w_0)
    e = element_wise_dot_product(v, w_0)

    a_mul_c = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Mul(a, c, a_mul_c)

    b_squared = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Pow(b, b_squared, 2)

    denominator = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Sub(a_mul_c, b_squared, denominator)

    b_mul_e = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Mul(b, e, b_mul_e)

    c_mul_d = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Mul(c, d, c_mul_d)

    b_mul_d = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Mul(b, d, b_mul_d)

    a_mul_e = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Mul(a, e, a_mul_e)

    s_c = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Sub(b_mul_e, c_mul_d, s_c)
    cv.Div(s_c, denominator, s_c)

    t_c = cv.CreateMat(rows, cols, cv.CV_32FC1)
    cv.Sub(a_mul_e, b_mul_d, t_c)
    cv.Div(t_c, denominator, t_c)

    u_x = cv.CreateMat(rows, cols, cv.CV_32FC1)
    u_y = cv.CreateMat(rows, cols, cv.CV_32FC1)
    u_z = cv.CreateMat(rows, cols, cv.CV_32FC1)

    cv.Split(u, u_x, u_y, u_z, None)

    su_x = cv.CreateMat(rows, cols, cv.CV_32FC1)
    su_y = cv.CreateMat(rows, cols, cv.CV_32FC1)
    su_z = cv.CreateMat(rows, cols, cv.CV_32FC1)

    cv.Mul(s_c, u_x, su_x)
    cv.Mul(s_c, u_y, su_y)
    cv.Mul(s_c, u_z, su_z)

    su = cv.CreateMat(rows, cols, cv.CV_32FC3)
    cv.Merge(su_x, su_y, su_z, None, su)

    tu_x = cv.CreateMat(rows, cols, cv.CV_32FC1)
    tu_y = cv.CreateMat(rows, cols, cv.CV_32FC1)
    tu_z = cv.CreateMat(rows, cols, cv.CV_32FC1)

    cv.Mul(t_c, u_x, tu_x)
    cv.Mul(t_c, u_y, tu_y)
    cv.Mul(t_c, u_z, tu_z)

    tu = cv.CreateMat(rows, cols, cv.CV_32FC3)
    cv.Merge(tu_x, tu_y, tu_z, None, tu)

    closest_point = cv.CreateMat(rows, cols, cv.CV_32FC3)
    cv.Add(P_0, su, closest_point)
    return closest_point
Beispiel #18
0
def image_callback(data):
    
    image = bridge.imgmsg_to_cv(data,"bgr8")
    
    #normalize image
    cv.Split(image,rgb_r,rgb_g,rgb_b,None)
    red_mean = cv2.mean(numpy.asarray(rgb_r[:,:]))
    cv.Div(src2 = cv.fromarray(numpy.ones((480,640))),src1 = rgb_r,dst = scaled_r, scale = 128/red_mean[0])
    green_mean = cv2.mean(numpy.asarray(rgb_g[:,:]))
    cv.Div(src2 = cv.fromarray(numpy.ones((480,640))),src1 = rgb_g,dst = scaled_g, scale = 128/green_mean[0])
    blue_mean = cv2.mean(numpy.asarray(rgb_b[:,:]))
    cv.Div(src2 = cv.fromarray(numpy.ones((480,640))),src1 = rgb_b,dst = scaled_b, scale = 128/blue_mean[0])
    cv.Merge(scaled_r,scaled_g,scaled_b,None,cv_image)
    
    
    #try all the color spaces
    cv.CvtColor(cv_image,hsv,cv.CV_BGR2HSV)
    cv.CvtColor(cv_image,lab,cv.CV_BGR2Lab)
    cv.CvtColor(cv_image,luv,cv.CV_BGR2Luv)
    cv.CvtColor(cv_image,hls,cv.CV_BGR2HLS)
    cv.CvtColor(cv_image,xyz,cv.CV_BGR2XYZ)
    cv.CvtColor(cv_image,ycrcb,cv.CV_BGR2YCrCb)
    
    cv.Split(hsv,hsv_h,hsv_s,hsv_v,None)
    cv.Split(cv_image,rgb_r,rgb_g,rgb_b,None)
    cv.Split(lab,lab_l,lab_a,lab_b,None)
    cv.Split(luv,luv_l,luv_u,luv_v,None)
    cv.Split(hls,hls_h,hls_l,hls_s,None)
    cv.Split(xyz,xyz_x,xyz_y,xyz_x,None)
    cv.Split(ycrcb,ycrcb_y,ycrcb_cr,ycrcb_cb,None)
    
    #cv.Not(lab_a,a_not)
    #cv.Sub(hsv_s,a_not,sa)
    cv.Not(hsv_s,s_not)
    cv.Not(hls_h,rock)
    
    #cv.Sub(hsv_s,hsv_h,rock)
    #cv.Sub(rock,hls_h,test)        #MAYBE LIZARD
    
    #cv.Sub(ycrcb_cr,hsv_h,rock)
    
    cv.Sub(luv_u,hls_h,scissors)
    cv.Sub(luv_v,luv_u,lizard)
    
    
    threshold_scissors(hls_h)     #works
    threshold_lizard(lizard)      #finnicky (gets confused with paper)
    threshold_spock(hls_s)        #works (noisy)
    threshold_paper(ycrcb_cb)
    threshold_rock(hls_s)         #not working
    
    
    cv.ShowImage("paper",paper_dilated)
    #cv.ShowImage("lizard",lizard_dilated)
    #cv.ShowImage("scissors",scissors_dilated)
    #cv.ShowImage("spock",spock_dilated)
    #cv.ShowImage("rock",rock)
    
    #threshold_red(ycrcb_cr)
    scissors_contours,_ = cv2.findContours(image=numpy.asarray(scissors_dilated[:,:]),mode=cv.CV_RETR_EXTERNAL,method=cv.CV_CHAIN_APPROX_SIMPLE)
    lizard_contours,_ = cv2.findContours(image=numpy.asarray(lizard_dilated[:,:]),mode=cv.CV_RETR_EXTERNAL,method=cv.CV_CHAIN_APPROX_SIMPLE)
    spock_contours,_ = cv2.findContours(image=numpy.asarray(spock_dilated[:,:]),mode=cv.CV_RETR_EXTERNAL,method=cv.CV_CHAIN_APPROX_SIMPLE)
    rock_contours,_ = cv2.findContours(image=numpy.asarray(rock_dilated[:,:]),mode=cv.CV_RETR_EXTERNAL,method=cv.CV_CHAIN_APPROX_SIMPLE)
    paper_contours,_ = cv2.findContours(image=numpy.asarray(paper[:,:]),mode=cv.CV_RETR_EXTERNAL,method=cv.CV_CHAIN_APPROX_SIMPLE)
    
    
    find_squares(scissors_contours,"scissors")
    find_squares(lizard_contours,"lizard")
    find_squares(spock_contours,"spock")
    find_squares(paper_contours,"paper")
    '''
    cv.ShowImage("HSV_H",hsv_h)
    cv.ShowImage("HSV_S",hsv_s)
    cv.ShowImage("HSV_V",hsv_v)
    cv.ShowImage("LAB_L",lab_l)
    cv.ShowImage("LAB_A",lab_a)
    cv.ShowImage("LAB_B",lab_b)
    cv.ShowImage("RGB_R",scaled_r)
    cv.ShowImage("RGB_G",scaled_g)
    cv.ShowImage("RGB_B",scaled_b)
    cv.ShowImage("LUV_L",luv_l)
    cv.ShowImage("LUV_U",luv_u)
    cv.ShowImage("LUV_V",luv_v)
    cv.ShowImage("HLS_H",hls_h)
    cv.ShowImage("HLS_L",hls_l)
    cv.ShowImage("HLS_S",hls_s)
    cv.ShowImage("YCrCb_Y",ycrcb_y)
    cv.ShowImage("YCrCb_Cr",ycrcb_cr)
    cv.ShowImage("YCrCb_Cb",ycrcb_cb)
    '''
    cv.ShowImage("normalized",cv_image)
    cv.WaitKey(3)
Beispiel #19
0
 def __SSIM(self, frame1, frame2):
     """
         The equivalent of Zhou Wang's SSIM matlab code using OpenCV.
         from http://www.cns.nyu.edu/~zwang/files/research/ssim/index.html
         The measure is described in :
         "Image quality assessment: From error measurement to structural similarity"
         C++ code by Rabah Mehdi. http://mehdi.rabah.free.fr/SSIM
         
         C++ to Python translation and adaptation by Iñaki Úcar
     """
     C1 = 6.5025
     C2 = 58.5225
     img1_temp = self.__array2cv(frame1)
     img2_temp = self.__array2cv(frame2)
     nChan = img1_temp.nChannels
     d = cv.IPL_DEPTH_32F
     size = img1_temp.width, img1_temp.height
     img1 = cv.CreateImage(size, d, nChan)
     img2 = cv.CreateImage(size, d, nChan)
     cv.Convert(img1_temp, img1)
     cv.Convert(img2_temp, img2)
     img1_sq = cv.CreateImage(size, d, nChan)
     img2_sq = cv.CreateImage(size, d, nChan)
     img1_img2 = cv.CreateImage(size, d, nChan)
     cv.Pow(img1, img1_sq, 2)
     cv.Pow(img2, img2_sq, 2)
     cv.Mul(img1, img2, img1_img2, 1)
     mu1 = cv.CreateImage(size, d, nChan)
     mu2 = cv.CreateImage(size, d, nChan)
     mu1_sq = cv.CreateImage(size, d, nChan)
     mu2_sq = cv.CreateImage(size, d, nChan)
     mu1_mu2 = cv.CreateImage(size, d, nChan)
     sigma1_sq = cv.CreateImage(size, d, nChan)
     sigma2_sq = cv.CreateImage(size, d, nChan)
     sigma12 = cv.CreateImage(size, d, nChan)
     temp1 = cv.CreateImage(size, d, nChan)
     temp2 = cv.CreateImage(size, d, nChan)
     temp3 = cv.CreateImage(size, d, nChan)
     ssim_map = cv.CreateImage(size, d, nChan)
     #/*************************** END INITS **********************************/
     #// PRELIMINARY COMPUTING
     cv.Smooth(img1, mu1, cv.CV_GAUSSIAN, 11, 11, 1.5)
     cv.Smooth(img2, mu2, cv.CV_GAUSSIAN, 11, 11, 1.5)
     cv.Pow(mu1, mu1_sq, 2)
     cv.Pow(mu2, mu2_sq, 2)
     cv.Mul(mu1, mu2, mu1_mu2, 1)
     cv.Smooth(img1_sq, sigma1_sq, cv.CV_GAUSSIAN, 11, 11, 1.5)
     cv.AddWeighted(sigma1_sq, 1, mu1_sq, -1, 0, sigma1_sq)
     cv.Smooth(img2_sq, sigma2_sq, cv.CV_GAUSSIAN, 11, 11, 1.5)
     cv.AddWeighted(sigma2_sq, 1, mu2_sq, -1, 0, sigma2_sq)
     cv.Smooth(img1_img2, sigma12, cv.CV_GAUSSIAN, 11, 11, 1.5)
     cv.AddWeighted(sigma12, 1, mu1_mu2, -1, 0, sigma12)
     #//////////////////////////////////////////////////////////////////////////
     #// FORMULA
     #// (2*mu1_mu2 + C1)
     cv.Scale(mu1_mu2, temp1, 2)
     cv.AddS(temp1, C1, temp1)
     #// (2*sigma12 + C2)
     cv.Scale(sigma12, temp2, 2)
     cv.AddS(temp2, C2, temp2)
     #// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
     cv.Mul(temp1, temp2, temp3, 1)
     #// (mu1_sq + mu2_sq + C1)
     cv.Add(mu1_sq, mu2_sq, temp1)
     cv.AddS(temp1, C1, temp1)
     #// (sigma1_sq + sigma2_sq + C2)
     cv.Add(sigma1_sq, sigma2_sq, temp2)
     cv.AddS(temp2, C2, temp2)
     #// ((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
     cv.Mul(temp1, temp2, temp1, 1)
     #// ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2))
     cv.Div(temp3, temp1, ssim_map, 1)
     index_scalar = cv.Avg(ssim_map)
     #// through observation, there is approximately
     #// 1% error max with the original matlab program
     return index_scalar[0]