def __produce_gradient_image(self, i, scale):
    size = cv.GetSize(i)
    grey_image = cv.CreateImage(size, 8, 1)

    size = [s/scale for s in size]
    grey_image_small = cv.CreateImage(size, 8, 1)

    cv.CvtColor(i, grey_image, cv.CV_RGB2GRAY)

    df_dx = cv.CreateImage(cv.GetSize(i), cv.IPL_DEPTH_16S, 1)
    cv.Sobel( grey_image, df_dx, 1, 1)
    cv.Convert(df_dx, grey_image)
    cv.Resize(grey_image, grey_image_small)#, interpolation=cv.CV_INTER_NN)
    cv.Resize(grey_image_small, grey_image)#, interpolation=cv.CV_INTER_NN)
    return grey_image
Esempio n. 2
0
def DetectFace(image, faceCascade):
    #modified from: http://www.lucaamore.com/?p=638

    min_size = (20, 20)
    image_scale = 1
    haar_scale = 1.1
    min_neighbors = 3
    haar_flags = 0

    # Allocate the temporary images
    smallImage = cv2.CreateImage((cv2.Round(
        image.width / image_scale), cv2.Round(image.height / image_scale)), 8,
                                 1)

    # Scale input image for faster processing
    cv2.Resize(image, smallImage, cv2.CV_INTER_LINEAR)

    # Equalize the histogram
    cv2.EqualizeHist(smallImage, smallImage)

    # Detect the faces
    faces = cv2.HaarDetectObjects(smallImage, faceCascade,
                                  cv2.CreateMemStorage(0), haar_scale,
                                  min_neighbors, haar_flags, min_size)

    # If faces are found
    if faces:
        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv2.Rectangle(image, pt1, pt2, cv2.RGB(255, 0, 0), 5, 8, 0)

    return image
Esempio n. 3
0
    def detect_no_draw(self, img):
        # allocate temporary images
        gray = cv.CreateImage((img.width, img.height), 8, 1)
        small_img = cv.CreateImage((cv.Round(img.width / self.image_scale),
                                    cv.Round(img.height / self.image_scale)),
                                   8, 1)

        # convert color input image to grayscale
        cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

        # scale input image for faster processing
        cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.EqualizeHist(small_img, small_img)

        if self.cascade:
            t = cv.GetTickCount()
            faces = cv.HaarDetectObjects(small_img, self.cascade,
                                         cv.CreateMemStorage(0),
                                         self.haar_scale, self.min_neighbors,
                                         self.haar_flags, self.min_size)
            t = cv.GetTickCount() - t
        if faces:
            return True
        else:
            return False
Esempio n. 4
0
def DetectEyes(imageCV, faceCascade, eyeCascade):
    minSize = (20, 20)
    imageScale = 2
    haarScale = 1.2
    minNeighbors = 2
    haarFlags = 0

    # Allocate the temporary images
    #gray = cv2.CreateImage((imageCV.width, image.height), 8, 1)
    #smallImage = cv.CreateImage((cv.Round(image.width / image_scale), cv2.Round (image.height / image_scale)), 8 ,1)

    # Convert color input image to grayscale
    cv2.cvtColor(image, gray, cv.CV_BGR2GRAY)

    # Scale input image for faster processing
    cv2.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

    # Equalize the histogram
    cv2.EqualizeHist(smallImage, smallImage)

    # Detect the faces
    faces = cv2.HaarDetectObjects(smallImage, faceCascade,
                                  cv2.CreateMemStorage(0), haar_scale,
                                  min_neighbors, haar_flags, min_size)

    # If faces are found
    if faces:

        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv2.Rectangle(image, pt1, pt2, cv2.RGB(255, 0, 0), 3, 8, 0)
def meanshiftUsingILM(path):
    # Load original image given the image path
    im = cv2.LoadImageM(path)
    # Load bank of filters
    filterBank = lmfilters.loadLMFilters()
    # Resize image to decrease dimensions during clustering
    resize_factor = 1
    thumbnail = cv2.CreateMat(im.height / resize_factor,
                              im.width / resize_factor, cv2.CV_8UC3)
    cv2.Resize(im, thumbnail)
    # now work with resized thumbnail image
    response = np.zeros(shape=((thumbnail.height) * (thumbnail.width), 4),
                        dtype=float)
    for f in range(0, 48):
        filter = filterBank[f]
        # Resize the filter with the same factor for the resized image
        dst = cv2.CreateImage(cv2.GetSize(thumbnail), cv2.IPL_DEPTH_32F, 3)
        resizedFilter = cv2.CreateMat(filter.height / resize_factor,
                                      filter.width / resize_factor,
                                      filter.type)
        cv2.Resize(filter, resizedFilter)
        # Apply the current filter
        cv2.Filter2D(thumbnail, dst, resizedFilter)
        featureIndex = getFilterTypeIndex(f)
        for j in range(0, thumbnail.height):
            for i in range(0, thumbnail.width):
                # Select the max. along the three channels
                maxRes = max(dst[j, i])
                if math.isnan(maxRes):
                    maxRes = 0.0
                if maxRes > response[thumbnail.width * j + i, featureIndex]:
                    # Store the max. response for the given feature index
                    response[thumbnail.width * j + i, featureIndex] = maxRes

    # Create new mean shift instance
    ms = MeanShift(bandwidth=10, bin_seeding=True)
    # Apply the mean shift clustering algorithm
    ms.fit(response)
    labels = ms.labels_
    n_clusters_ = np.unique(labels)
    print("Number of clusters: ", len(n_clusters_))
    repaintImage(thumbnail, labels)
    cv2.Resize(thumbnail, im)
    return im
Esempio n. 6
0
def fastResize(I, rszFac, sig=-1):
    if rszFac == 1:
        return I
    else:
        Icv = cv.fromarray(np.copy(I))
        I1cv = cv.CreateMat(int(math.floor(
            I.shape[0] * rszFac)), int(math.floor(I.shape[1] * rszFac)), Icv.type)
        cv.Resize(Icv, I1cv)
        Iout = np.asarray(I1cv)
        if sig > 0:
            Iout = gaussian_filter(Iout, sig)

        return Iout
Esempio n. 7
0
def enlarge_image(image, factor):
    """ Enlarge the image to the given size
    Image must be of type cv.cvmat"""

    if type(image).__name__ == 'cvmat':
        new_image = cv.CreateMat(int(round(image.height * factor)),
                                 int(round(image.width * factor)),
                                 cv.GetElemType(image))
        cv.Resize(image, new_image)
        image = new_image
        logging.debug(
            'Image has been enlarged with factor %.3f (face-detector.py)' %
            (factor))
        return image
    else:
        logging.error('Unkown Image Type (tools.py)')
Esempio n. 8
0
def downsize_image(image):
    """ Resize the image to the given size
    Image must be of type cv.cvmat"""
    height_factor = float(image.height / parameter.max_facesize[0])
    width_factor = float(image.width / parameter.max_facesize[1])
    if height_factor > width_factor:
        new_face = cv.CreateMat(image.height / height_factor,
                                image.width / height_factor,
                                cv.GetElemType(image))
        downsize_factor = height_factor
    else:
        new_face = cv.CreateMat(int(image.height / width_factor),
                                int(image.width / width_factor),
                                cv.GetElemType(image))
        downsize_factor = width_factor
    cv.Resize(image, new_face)
    return new_face, downsize_factor
Esempio n. 9
0
    def detect_and_draw(self, img):

        # allocate temporary images
        gray = cv.CreateImage((img.width, img.height), 8, 1)
        small_img = cv.CreateImage((cv.Round(img.width / self.image_scale),
                                    cv.Round(img.height / self.image_scale)),
                                   8, 1)

        # convert color input image to grayscale
        cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

        # scale input image for faster processing
        cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.EqualizeHist(small_img, small_img)

        if self.cascade:
            t = cv.GetTickCount()
            faces = cv.HaarDetectObjects(small_img, self.cascade,
                                         cv.CreateMemStorage(0),
                                         self.haar_scale, self.min_neighbors,
                                         self.haar_flags, self.min_size)
            t = cv.GetTickCount() - t
            #		print "time taken for detection = %gms" % (t/(cv.GetTickFrequency()*1000.))
            if faces:
                face_found = True

                for ((x, y, w, h), n) in faces:
                    # the input to cv.HaarDetectObjects was resized, so scale the
                    # bounding box of each face and convert it to two CvPoints
                    pt1 = (int(x * self.image_scale),
                           int(y * self.image_scale))
                    pt2 = (int((x + w) * self.image_scale),
                           int((y + h) * self.image_scale))
                    cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
            else:
                face_found = False

        cv.ShowImage("video", img)
        return face_found
Esempio n. 10
0
# @Description: createImage.py
# @Author: 孤烟逐云zjy
# @Date: 2020/5/3 9:49
# @SoftWare: PyCharm
# @CSDN: https://blog.csdn.net/zjy123078_zjy
# @博客园: https://www.cnblogs.com/guyan-2020/

import cv2 as cv

im = cv.LoadImage('./images/photo01.jpg')  # get the img

thum = cv.CreateImage((im.width / 2, im.height / 2), 8, 3)
cv.Resize(im, thum)
cv.SaveImage('thum.jpg', thum)
Esempio n. 11
0
    def detectFace(self, cam_img, faceCascade, eyeCascade, mouthCascade):  # cam_img should be cv2.cv.iplcam_img
        min_size = (20, 20)
        image_scale = 2
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0
        image_width = int(cam_img.get(cv.CV_CAP_PROP_FRAME_WIDTH))
        image_height = int(cam_img.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
        # Allocate the temporary images
        gray = cv.CreateImage((image_width, image_height), 8, 1)  # tuple as the first arg
        smallImage = cv.CreateImage((cv.Round(image_width / image_scale), cv.Round(image_height / image_scale)), 8, 1)

        (ok, img) = cam_img.read()
        # print 'gray is of ',type(gray) >>> gray is of  <type 'cv2.cv.iplimage'>
        # print type(smallImage)  >>> <type 'cv2.cv.iplimage'>
        # print type(image) >>> <type 'cv2.VideoCapture'>
        # print type(img) >>> <type 'numpy.ndarray'>

        # convert numpy.ndarray to iplimage
        ipl_img = cv2.cv.CreateImageHeader((img.shape[1], img.shape[0]), cv.IPL_DEPTH_8U, 3)
        cv2.cv.SetData(ipl_img, img.tostring(), img.dtype.itemsize * 3 * img.shape[1])

        # Convert color input image to grayscale
        cv.CvtColor(ipl_img, gray, cv.CV_BGR2GRAY)

        # Scale input image for faster processing
        cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

        # Equalize the histogram
        cv.EqualizeHist(smallImage, smallImage)

        # Detect the faces
        faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        # => The function returns a list of tuples, (rect, neighbors) , where rect is a CvRect specifying the object’s extents and neighbors is a number of neighbors.
        # => CvRect cvRect(int x, int y, int width, int height)
        # If faces are found
        if faces:
            face = faces[0]
            self.faceX = face[0][0]
            self.faceY = face[0][1]

            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(ipl_img, pt1, pt2, cv.RGB(0, 0, 255), 3, 8, 0)
                # face_region = cv.GetSubRect(ipl_img,(x,int(y + (h/4)),w,int(h/2)))

            cv.SetImageROI(ipl_img, (pt1[0],
                                     pt1[1],
                                     pt2[0] - pt1[0],
                                     int((pt2[1] - pt1[1]) * 0.7)))

            eyes = cv.HaarDetectObjects(ipl_img, eyeCascade,
                                        cv.CreateMemStorage(0),
                                        haar_scale, min_neighbors,
                                        haar_flags, (15, 15))

            if eyes:
                # For each eye found
                for eye in eyes:
                    # Draw a rectangle around the eye
                    cv.Rectangle(ipl_img,  # image
                                 (eye[0][0],  # vertex pt1
                                  eye[0][1]),
                                 (eye[0][0] + eye[0][2],  # vertex pt2 opposite to pt1
                                  eye[0][1] + eye[0][3]),
                                 cv.RGB(255, 0, 0), 1, 4, 0)  # color,thickness,lineType(8,4,cv.CV_AA),shift

        cv.ResetImageROI(ipl_img)

        return ipl_img
Esempio n. 12
0
def detect_and_draw(img, cascade):
    t = cv2.GetTickCount()  ## start counter
    cv2.CvtColor(img, gray, cv2.CV_BGR2GRAY)
    cv2.Resize(gray, small_img, cv2.CV_INTER_LINEAR)

    #Ages all trackedFaces
    for f in trackedFaces:
        f.updateLife()
    #Remove expired faces
    for f in trackedFaces:
        if (f.isTooOld()):
            trackedFaces.remove(f)

    faces = cv2.HaarDetectObjects(small_img, cascade, storage, haar_scale,
                                  min_neighbors, haar_flags, min_size)
    drawline = 0
    if faces:
        #found a face
        for ((x, y, w, h), n) in faces:
            matchedFace = False
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            pt3 = (int(x * image_scale) + int(
                ((x + w) * image_scale - x * image_scale) / 3),
                   int(y * image_scale))
            pt4 = (int((x + w) * image_scale) - int(
                ((x + w) * image_scale - x * image_scale) / 3),
                   int((y * image_scale) + int((
                       (y + h) * image_scale) - int(y * image_scale)) / 3))

            #check if there are trackedFaces
            if (len(trackedFaces) > 0):
                #each face being tracked
                for f in trackedFaces:
                    #the face is found (small movement)
                    if ((abs(f.xpt - pt1[0]) < FACE_MAX_MOVEMENT)
                            and (abs(f.ypt - pt1[1]) < FACE_MAX_MOVEMENT)):
                        matchedFace = True
                        f.updateFace(int(w * image_scale),
                                     int(h * image_scale), pt1[0], pt1[1])
                        mf = f
                        break

                #if face not found, add a new face
                if (matchedFace == False):
                    f = Face(0, int(w * image_scale), int(h * image_scale),
                             pt1[0], pt1[1], 0)
                    trackedFaces.append(f)
                    mf = f
            #No tracked faces: adding one
            else:
                f = Face(0, int(w * image_scale), int(h * image_scale), pt1[0],
                         pt1[1], 0)
                trackedFaces.append(f)
                mf = f
            #where to draw face and properties
            if (mf.age > 5):

                #draw attention line
                lnpt1 = (int(mf.xpt * scale), int(mf.ypt * scale - 5) - 5)
                if (mf.age > mf.width):
                    lnpt2 = (int(mf.xpt * scale + mf.width),
                             int(mf.ypt * scale - 5))
                else:
                    lnpt2 = (int(mf.xpt * scale + mf.age),
                             int(mf.ypt * scale - 5))

                cv2.Rectangle(img, lnpt1, lnpt2, RED, 4, 8,
                              0)  ## drawing bolded attention line

                ### draw eyes
                cv2.Rectangle(img, mf.eyeLeft1, mf.eyeLeft2, MAGENTA, 3, 8, 0)
                cv2.Rectangle(img, mf.eyeRight1, mf.eyeRight2, MAGENTA, 3, 8,
                              0)
                #
                ### draw mouth
                cv2.Rectangle(img, mf.mouthTopLeft, mf.mouthBotRight, ORANGE,
                              3, 8, 0)
                #
                ### draw face
                cv2.Rectangle(img, pt1, pt2, getColor(mf), 3, 8, 0)
                #cv2.Rectangle( img, pt3, pt4, MAGENTA, 1, 8, 0 ) #forehead
                drawline = mf.age

    if (CAPTURING): saveAsJPG(img)
    if (osName == "nt"): cv2.Flip(img, img, 0)
    cv2.ShowImage('Camera', img)
    t = cv2.GetTickCount() - t  ## counter for FPS
    print("%i fps." % (cv2.GetTickFrequency() * 1000000. / t))  ## print FPS
def imgresult = imgmatch(imgfile):
import numpy as np
import cv2 as cv
s1 = 'DeepLearning.JPG';s2 = 'ImageVideo.JPG';
s3 = 'ModenPhoto.JPG'; s4 = 'PatternRecognition.JPG'; 
booklist = [s1,s2,s3,s4];
s11 = 'deep_goodfellow';s22 = 'handbook_bovik';
s33 = 'modern_mikhail'; s44 = 'pattern_bishop'; 
newbook = [s11,s22,s33,s44];
   
for a < len(booklist)
imgRGB = cv.imread(booklist(a));
imgRGB = cv.Resize(imgRGB, [1000,NaN]);
img = single(rgb2gray(imgRGB));
[fcover,dcover] = vl_sift(img);

#imgfile = imread('IMG_0212.JPG');
imgfile = cv.resize(imgfile, [1000,NaN]);
Ia = single(rgb2gray(imgfile));
[fa, da] = vl_sift(Ia) ;

[matches, scores] = vl_ubcmatch(dcover, da) ;

#figure(1) ; clf ;
#imagesc(cat(2, Ia, img)) ;

xa = fa(1,matches(2,:)) ;
xcover = fcover(1,matches(1,:)) + size(Ia,2) ;
ya = fa(2,matches(2,:)) ;
ycover = fcover(2,matches(1,:)) ;
Esempio n. 14
0
def resizeImage(im, width, height):
    #It appears to me that resize an image can be significant for the ocr engine to detect characters
    res = cv.CreateImage((width, height), im.depth, im.channels)
    cv.Resize(im, res)
    return res
def meanshiftUsingPCA(path):
    # Load original image given the image path
    im = cv2.LoadImageM(path)
    #convert image to YUV color space
    cv.CvtColor(im, im, cv2.CV_BGR2YCrCb)
    # Load bank of filters
    filterBank = lmfilters.loadLMFilters()
    # Resize image to decrease dimensions during clustering
    resize_factor = 1
    thumbnail = cv2.CreateMat(im.height / resize_factor,
                              im.width / resize_factor, cv2.CV_8UC3)
    cv2.Resize(im, thumbnail)
    # now work with resized thumbnail image
    response = np.zeros(shape=((thumbnail.height) * (thumbnail.width), 51),
                        dtype=float)
    for f in range(0, 48):
        filter = filterBank[f]
        # Resize the filter with the same factor for the resized image
        dst = cv2.CreateImage(cv2.GetSize(thumbnail), cv2.IPL_DEPTH_32F, 3)
        resizedFilter = cv2.CreateMat(filter.height / resize_factor,
                                      filter.width / resize_factor,
                                      filter.type)
        cv2.Resize(filter, resizedFilter)
        # Apply the current filter
        cv2.Filter2D(thumbnail, dst, resizedFilter)
        for j in range(0, thumbnail.height):
            for i in range(0, thumbnail.width):
                # Select the max. along the three channels
                maxRes = max(dst[j, i])
                if math.isnan(maxRes):
                    maxRes = 0.0
                if maxRes > response[thumbnail.width * j + i, f]:
                    # Store the max. response for the given feature index
                    response[thumbnail.width * j + i, f] = maxRes

    #YUV features
    count = 0
    for j in range(0, thumbnail.height):
        for i in range(0, thumbnail.width):
            response[count, 48] = thumbnail[j, i][0]
            response[count, 49] = thumbnail[j, i][1]
            response[count, 50] = thumbnail[j, i][2]
            count += 1

    #get the first 4 primary components using pca
    pca = PCA(response)
    pcaResponse = zeros([thumbnail.height * thumbnail.width, 4])

    for i in range(0, thumbnail.height * thumbnail.width):
        pcaResponse[i] = pca.getPCA(response[i], 4)

    # Create new mean shift instance
    ms = MeanShift(bandwidth=10, bin_seeding=True)
    # Apply the mean shift clustering algorithm
    ms.fit(pcaResponse)
    labels = ms.labels_
    n_clusters_ = np.unique(labels)
    print("Number of clusters: ", len(n_clusters_))
    repaintImage(thumbnail, labels)
    cv2.Resize(thumbnail, im)
    return im
Esempio n. 16
0
        frame_copy = cv.CreateImage((frame.width, frame.height),
                                    cv.IPL_DEPTH_8U, frame.nChannels)
    if frame.origin == cv.IPL_ORIGIN_TL:
        cv.Flip(frame, frame, -1)

    # Our operations on the frame come here
    gray = cv.CreateImage((frame.width, frame.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        frame.width / image_scale), cv.Round(frame.height / image_scale)), 8,
                               1)

    # convert color input image to grayscale
    cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    midFace = None

    if (cascade):
        t = cv.GetTickCount()
        # HaarDetectObjects takes 0.02s
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        if faces:
            lights(50 if len(faces) == 0 else 0, 50 if len(faces) > 0 else 0,
                   0, 50)
Esempio n. 17
0
def resize_image(image, height = 240, width = 320):
    imageBuffer = image;
    smallerImage = cv.CreateImage((width, height), imageBuffer.depth, imageBuffer.nChannels);
    cv.Resize(imageBuffer, smallerImage, interpolation=cv.CV_INTER_CUBIC);
    return smallerImage;
Esempio n. 18
0

import cv2

imageBuffer = cv2.LoadImage( 'images/digits_sudoku2.png' )
nW = 468
nH = 99
smallerImage = cv2.CreateImage( (nH, nW), imageBuffer.depth, imageBuffer.nChannels )
cv2.Resize( imageBuffer, smallerImage , interpolation=cv2.CV_INTER_CUBIC )
cv2.SaveImage( 'images/digits_sudoku3.png', smallerImage )