コード例 #1
0
def rgb2gray(img, grayscale):
    '''
    convert img to grayscale
    @param img: image to convert
    @param grayscale: target
    '''
    # cv.CvtColor(img,self.img,cv.CV_RGB2HLS)
    cv.CvtColor(img, grayscale, cv.CV_RGB2GRAY)
    return
    cv.SetImageCOI(img, 2)
    cv.Copy(img, grayscale)
    cv.SetImageCOI(img, 0)
コード例 #2
0
    def test_divider(self):
        image = cv.LoadImage(
            os.path.abspath(os.environ['BORG'] +
                            '/Brain/data/hog_test/noface.jpg'))
        subRect = (0, 0, 250, 187)
        subimage = self.divider.crop(image, subRect)
        subimage1 = image

        result = self.divider.divide(image, 2, 2)

        if cv.GetSize(subimage) != cv.GetSize(result[0]):
            self.fail(
                "The subimage sizes are not correct. Either correctly crop the image manually or check divider function"
            )

        dif = cv.CreateImage(cv.GetSize(subimage), cv.IPL_DEPTH_8U, 3)
        dif2 = cv.CreateImage(cv.GetSize(subimage), cv.IPL_DEPTH_8U, 3)

        cv.AbsDiff(subimage, result[0], dif)
        cv.Threshold(dif, dif2, 50, 255, cv.CV_THRESH_TOZERO)
        for i in range(3):
            cv.SetImageCOI(dif2, i + 1)
            n_nonzero = cv.CountNonZero(dif2)

        if n_nonzero < 400:
            threshold = 0
        else:
            threshold = 1
            self.assertEqual(threshold, 0, "The subimages are different")

        result = self.divider.divide(image, 4, 4, option="pro")
        print len(result)
        print result

        dif = cv.CreateImage(cv.GetSize(subimage1), cv.IPL_DEPTH_8U, 3)
        dif2 = cv.CreateImage(cv.GetSize(subimage1), cv.IPL_DEPTH_8U, 3)

        cv.AbsDiff(subimage1, result[0], dif)
        cv.Threshold(dif, dif2, 50, 255, cv.CV_THRESH_TOZERO)
        for i in range(3):
            cv.SetImageCOI(dif2, i + 1)
            n_nonzero = cv.CountNonZero(dif2)

        if n_nonzero < 400:
            threshold = 0
        else:
            threshold = 1
            self.assertEqual(threshold, 0, "The subimages are different")

        result = self.divider.divide(image, 4, 4, option="pro", overlap=10)
コード例 #3
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    #cv.EqualizeHist(small_img, small_img)
    image = img
    cv.SetImageCOI(image, 1)
    cv.SetImageCOI(image, 0)

    hsv = cv.CreateImage(cv.GetSize(image), 8, 3)
    h_plane = cv.CreateImage(cv.GetSize(image), 8, 1)
    s_plane = cv.CreateImage(cv.GetSize(image), 8, 1)
    cv.CvtColor(image, hsv, cv.CV_RGB2HSV)
    cv.Split(hsv, h_plane, s_plane, None, None)
    #img=s_plane;

    mergi(0, 0)
    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            mergi(-100, -100)
            mergi(-100, 100)
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)


#        else
#            mergi(0,0)

    cv.ShowImage("result", img)
コード例 #4
0
ファイル: cv20squares.py プロジェクト: vfn/opencv
def find_squares4(color_img):
    """
    Finds multiple squares in image

    Steps:
    -Use Canny edge to highlight contours, and dilation to connect
    the edge segments.
    -Threshold the result to binary edge tokens
    -Use cv.FindContours: returns a cv.CvSequence of cv.CvContours
    -Filter each candidate: use Approx poly, keep only contours with 4 vertices,
    enough area, and ~90deg angles.

    Return all squares contours in one flat list of arrays, 4 x,y points each.
    """
    #select even sizes only
    width, height = (color_img.width & -2, color_img.height & -2)
    timg = cv.CloneImage(color_img)  # make a copy of input image
    gray = cv.CreateImage((width, height), 8, 1)

    # select the maximum ROI in the image
    cv.SetImageROI(timg, (0, 0, width, height))

    # down-scale and upscale the image to filter out the noise
    pyr = cv.CreateImage((width / 2, height / 2), 8, 3)
    cv.PyrDown(timg, pyr, 7)
    cv.PyrUp(pyr, timg, 7)

    tgray = cv.CreateImage((width, height), 8, 1)
    squares = []

    # Find squares in every color plane of the image
    # Two methods, we use both:
    # 1. Canny to catch squares with gradient shading. Use upper threshold
    # from slider, set the lower to 0 (which forces edges merging). Then
    # dilate canny output to remove potential holes between edge segments.
    # 2. Binary thresholding at multiple levels
    N = 11
    for c in [0, 1, 2]:
        #extract the c-th color plane
        cv.SetImageCOI(timg, c + 1)
        cv.Copy(timg, tgray, None)
        cv.Canny(tgray, gray, 0, 50, 5)
        cv.Dilate(gray, gray)
        squares = squares + find_squares_from_binary(gray)

        # Look for more squares at several threshold levels
        for l in range(1, N):
            cv.Threshold(tgray, gray, (l + 1) * 255 / N, 255,
                         cv.CV_THRESH_BINARY)
            squares = squares + find_squares_from_binary(gray)

    return squares
コード例 #5
0
    def extraction_laser(image, disp=False):
        """ Extraction des laser en image de label
        """
        ## Conversion vers Lab recuperation a
        Lab = cv.CloneImage(image)
        cv.Zero(Lab)
        a = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
        cv.CvtColor(image, Lab, cv.CV_BGR2Lab)
        cv.SetImageCOI(Lab, 2)
        cv.Copy(Lab, a)

        ## Seuillage a au niveau laser
        BW = cv.CloneImage(a)
        cv.Zero(BW)
        T = 0 + 128  #    Seuillage a 0+delta avec delta 128
        Nb_lbl = 0
        while Nb_lbl < 4:
            cv.Threshold(a, BW, T, 255, cv.CV_THRESH_BINARY)
            A_BW = cvnumpy.cv2array(BW)[:, :, 0]
            A_lbl, Nb_lbl = ndimage.label(A_BW)
            T -= 1

        ## Correspondre bon label au laser
        center = np.floor(np.array(A_BW.shape) / 2)
        # Contrainte de proximite au centre de l image considération d une photo prise distante de plus de 37cm
        # -> corresond proportion diagonale laser sur diagonale image de 17%
        proportion = 0.2
        diag_constraint = proportion * (np.sqrt(np.sum(
            np.array(A_BW.shape)**2)))
        # Verification contrainte
        new_lbl = 1
        A_center_lbl = A_lbl * 0
        while new_lbl < Nb_lbl:
            for lbl in xrange(1, Nb_lbl + 1):
                coord_current_lbl = np.unravel_index(pl.find(A_lbl == lbl),
                                                     A_lbl.shape)
                coord_center = np.sum(coord_current_lbl, 1) / len(
                    coord_current_lbl[0])
                diag_current_lbl = 2 * np.sqrt(
                    np.sum((coord_center - center)**2))
                if diag_current_lbl < diag_constraint:
                    A_center_lbl[coord_center[0], coord_center[1]] = new_lbl
                    A_lbl[A_lbl == lbl] = new_lbl
                    new_lbl += 1
                else:
                    A_lbl[A_lbl == lbl] = 0
            proportion -= 0.5

        if disp:
            #A_a = self.cv2array(a)[:,:,0]

            #fig1 = pl.figure()
            #pl.imshow(A_a, cmap = pl.cm.gray)
            #pl.title('Composante a de Lab')
            #fig1.show()

            fig2 = pl.figure()
            #pl.imshow(A_lbl)
            pl.title('Image label:Seuillage a ' + repr(T))
            fig2.show()
        return np.transpose(
            np.unravel_index(pl.find(A_center_lbl > 0), A_lbl.shape))

        def rectif(self, image, cadreIn, cadreOut):
            prev_image = cv.CloneImage(image)
            cv.Zero(image)
            mmat = cv.CreateMat(3, 3, cv.CV_32FC1)
            print("mmat= %s" % repr(mmat))
            cv.GetPerspectiveTransform(cadreIn, cadreOut, mmat)
            cv.WarpPerspective(prev_image, image,
                               mmat)  #, flags=cv.CV_WARP_INVERSE_MAP )