Пример #1
0
def main():
    parser = argparse.ArgumentParser(
        description='Runs text detector on relevant images')
    parser.add_argument('classifier_file', help='Path to classifier CLF')
    parser.add_argument('-l',
                        '--limit',
                        type=int,
                        metavar='COUNT',
                        required=False,
                        help='Maximum number of images to use')
    parser.add_argument(
        '-r',
        '--random',
        action="store_true",
        default=False,
        required=False,
        help='Fetch images ordered randomly if limit is active')
    parser.add_argument('database', help='Database to use')
    args = parser.parse_args()
    parameters["classifier_file"] = args.classifier_file
    i = rigor.runner.Runner('text',
                            parameters,
                            limit=args.limit,
                            random=args.random)
    database_mapper = DatabaseMapper(Database.instance(args.database))
    for result in i.run():
        detected = result[1]
        expected = result[2]
        image = database_mapper.get_image_by_id(result[0])
        cv_image = rigor.imageops.fetch(image)
        cv2.polylines(cv_image, expected, True, cv2.RGB(0, 255, 0))
        cv2.polylines(cv_image, detected, True, cv2.RGB(255, 255, 0))
        cv2.imwrite(".".join((str(image["id"]), image["format"])), cv_image)
Пример #2
0
def draw_circles(storage, output):
    circles = np.asarray(storage)
    print len(circles), 'circles found'
    for circle in circles:
        Radius, x, y = int(circle[0][2]), int(circle[0][0]), int(circle[0][1])
        cv2.Circle(output, (x, y), 1, cv2.RGB(0, 255, 0), -1, 8, 0)
        cv2.Circle(output, (x, y), Radius, cv2.RGB(255, 0, 0), 3, 8, 0)
Пример #3
0
def DetectEyes(imageCV, faceCascade, eyeCascade):
    minSize = (20, 20)
    imageScale = 2
    haarScale = 1.2
    minNeighbors = 2
    haarFlags = 0

    # Allocate the temporary images
    #gray = cv2.CreateImage((imageCV.width, image.height), 8, 1)
    #smallImage = cv.CreateImage((cv.Round(image.width / image_scale), cv2.Round (image.height / image_scale)), 8 ,1)

    # Convert color input image to grayscale
    cv2.cvtColor(image, gray, cv.CV_BGR2GRAY)

    # Scale input image for faster processing
    cv2.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

    # Equalize the histogram
    cv2.EqualizeHist(smallImage, smallImage)

    # Detect the faces
    faces = cv2.HaarDetectObjects(smallImage, faceCascade,
                                  cv2.CreateMemStorage(0), haar_scale,
                                  min_neighbors, haar_flags, min_size)

    # If faces are found
    if faces:

        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv2.Rectangle(image, pt1, pt2, cv2.RGB(255, 0, 0), 3, 8, 0)
def DetectFace(image, faceCascade, returnImage=False):
    # This function takes a grey scale cv image and finds
    # the patterns defined in the haarcascade function
    # modified from: http://www.lucaamore.com/?p=638

    #variables
    min_size = (20, 20)
    haar_scale = 1.1
    min_neighbors = 3
    haar_flags = 0

    # Equalize the histogram
    cv2.EqualizeHist(image, image)

    # Detect the faces
    faces = cv2.HaarDetectObjects(image, faceCascade, cv2.CreateMemStorage(0),
                                  haar_scale, min_neighbors, haar_flags,
                                  min_size)

    # If faces are found
    if faces and returnImage:
        for ((x, y, w, h), n) in faces:
            # Convert bounding box to two CvPoints
            pt1 = (int(x), int(y))
            pt2 = (int(x + w), int(y + h))
            cv2.Rectangle(image, pt1, pt2, cv2.RGB(255, 0, 0), 5, 8, 0)

    if returnImage:
        return image
    else:
        return faces
Пример #5
0
def DetectFace(image, faceCascade, returnImage=False):
    min_size = (20, 20)
    haar_scale = 1.1
    min_neighbors = 3
    haar_flags = 0

    # Equalize the histogram
    cv.EqualizeHist(image, image)

    # Detect the faces
    faces = cv.HaarDetectObjects(image, faceCascade, cv.CreateMemStorage(0),
                                 haar_scale, min_neighbors, haar_flags,
                                 min_size)

    # If faces are found
    if faces and returnImage:
        for ((x, y, w, h), n) in faces:
            # Convert bounding box to two CvPoints
            pt1 = (int(x), int(y))
            pt2 = (int(x + w), int(y + h))
            cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 5, 8, 0)

    if returnImage:
        return image
    else:
        return faces
Пример #6
0
def DetectFace(image, faceCascade):
    #modified from: http://www.lucaamore.com/?p=638

    min_size = (20, 20)
    image_scale = 1
    haar_scale = 1.1
    min_neighbors = 3
    haar_flags = 0

    # Allocate the temporary images
    smallImage = cv2.CreateImage((cv2.Round(
        image.width / image_scale), cv2.Round(image.height / image_scale)), 8,
                                 1)

    # Scale input image for faster processing
    cv2.Resize(image, smallImage, cv2.CV_INTER_LINEAR)

    # Equalize the histogram
    cv2.EqualizeHist(smallImage, smallImage)

    # Detect the faces
    faces = cv2.HaarDetectObjects(smallImage, faceCascade,
                                  cv2.CreateMemStorage(0), haar_scale,
                                  min_neighbors, haar_flags, min_size)

    # If faces are found
    if faces:
        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv2.Rectangle(image, pt1, pt2, cv2.RGB(255, 0, 0), 5, 8, 0)

    return image
Пример #7
0
    def blob_proc(self, img):
        # don't use me, im bad
        d_red = cv2.RGB(150, 55, 65)
        l_red = cv2.RGB(250, 200, 200)

        detector = cv2.FeatureDetector_create('MSER')
        fs = detector.detect(img)
        fs.sort(key=lambda x: -x.size)

        sfs = [x for x in fs if not self.supress(x, fs)]

        for f in sfs:
            cv2.circle(img, (int(f.pt[0]), int(f.pt[1])), int(f.size / 2),
                       d_red, 2, cv2.CV_AA)
            cv2.circle(img, (int(f.pt[0]), int(f.pt[1])), int(f.size / 2),
                       l_red, 1, cv2.CV_AA)

        h, w = orig.shape[:2]
        vis = np.zeros((h, w * 2 + 5), np.uint8)
        vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
        vis[:h, :w] = orig
        vis[:h, w + 5:w * 2 + 5] = img

        return vis
Пример #8
0
def Hist(image):
    a = [0] * 256
    w = image.width
    h = image.height
    iHist = cv2.CreateImage((256, 256), 8, 3)
    for i in range(h):
        for j in range(w):
            iGray = int(image[i, j])
            a[iGray] = a[iGray] + 1

    S = max(a)
    c = cv2.RGB(200, 150, 255)

    for k in range(256):
        a[k] = a[k] * 200 / S
        x = (k, 255)
        y = (k, 255 - a[k])
        cv2.Line(iHist, x, y, c)

    return iHist
Пример #9
0
def DetectFace(image, faceCascade, returnImage=False):
    # This function takes a grey scale cv image and finds
    # the patterns defined in the haarcascade function
    # modified from: http://www.lucaamore.com/?p=638

    # variables
    min_size = (20, 20)
    haar_scale = 1.1
    min_neighbors = 3
    haar_flags = 0

    # Equalize the histogram
    # cv2.EqualizeHist(image, image)
    #
    # # Detect the faces
    # faces = cv2.HaarDetectObjects(
    #         image, faceCascade, cv2.createMemStorage(0),
    #         haar_scale, min_neighbors, haar_flags, min_size
    #     )

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Detect faces in the image
    faces = faceCascade.detectMultiScale(gray,
                                         scaleFactor=1.5,
                                         minNeighbors=7,
                                         minSize=(30, 30),
                                         flags=cv2.CASCADE_SCALE_IMAGE)

    # If faces are found
    if faces and returnImage:
        for ((x, y, w, h), n) in faces:
            # Convert bounding box to two CvPoints
            pt1 = (int(x), int(y))
            pt2 = (int(x + w), int(y + h))
            cv2.rectangle(image, pt1, pt2, cv2.RGB(255, 0, 0), 5, 8, 0)

    if returnImage:
        return image
    else:
        return faces
Пример #10
0
    def detect_and_draw(self, img):

        # allocate temporary images
        gray = cv.CreateImage((img.width, img.height), 8, 1)
        small_img = cv.CreateImage((cv.Round(img.width / self.image_scale),
                                    cv.Round(img.height / self.image_scale)),
                                   8, 1)

        # convert color input image to grayscale
        cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

        # scale input image for faster processing
        cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.EqualizeHist(small_img, small_img)

        if self.cascade:
            t = cv.GetTickCount()
            faces = cv.HaarDetectObjects(small_img, self.cascade,
                                         cv.CreateMemStorage(0),
                                         self.haar_scale, self.min_neighbors,
                                         self.haar_flags, self.min_size)
            t = cv.GetTickCount() - t
            #		print "time taken for detection = %gms" % (t/(cv.GetTickFrequency()*1000.))
            if faces:
                face_found = True

                for ((x, y, w, h), n) in faces:
                    # the input to cv.HaarDetectObjects was resized, so scale the
                    # bounding box of each face and convert it to two CvPoints
                    pt1 = (int(x * self.image_scale),
                           int(y * self.image_scale))
                    pt2 = (int((x + w) * self.image_scale),
                           int((y + h) * self.image_scale))
                    cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
            else:
                face_found = False

        cv.ShowImage("video", img)
        return face_found
Пример #11
0
class Camera:
    """
    ImageTk.PhotoImage(image) => PhotoImage instance
    Creates a Tkinter-compatible photo image, which can be used everywhere Tkinter
    expects an image object. If the image is an RGBA image, pixels having alpha 0
    are treated as transparent.

    create_image(x0, y0, options...) => id
    Create a image item placed relative to the given position.
    Note that the image itself is given by the image option.
    image: The image object (a Tkinter PhotoImage or BitmapImage instance,
    or instances of the corresponding Python Imaging Library classes).
    anchor: Specifies which part of the image that should be placed at the given position.
    Use one of N, NE, E, SE, S, SW, W, NW, or CENTER. Default is CENTER.

    Warning
    There is a bug in the current version of the Python Imaging Library that can cause
    your images not to display properly. When you create an object of class PhotoImage,
    the reference count for that object does not get properly incremented, so unless
    you keep a reference to that object somewhere else, the PhotoImage object may be
    garbage-collected, leaving your graphic blank on the application.
    """

    def __init__(self, cam, root, canvas, histCanvas, frame, position):
        self.cam = cam
        self.root = root
        self.canvas = canvas
        self.histCanvas = histCanvas
        self.frame = frame
        self.onoff = False
        self.detect = False
        self.effect = 'none'
        self.image = None
        self.a = None
        self.b = None
        self.contrast = 0.0
        self.brightness = 0.0
        self.position = position
        self.faceX = 0
        self.faceY = 0

    ##==============================================================================
    def getFaceX(self):
        return str(self.faceX)

    def getFaceY(self):
        return str(self.faceY)

    ##==============================================================================
    def setContrast(self, value):
        self.contrast = value
        self.cam.set(cv.CAP_PROP_CONTRAST, self.contrast)

    ##==============================================================================
    def setBrightness(self, value):
        self.brightness = value
        self.cam.set(cv.CAP_PROP_BRIGHTNESS, self.brightness)

    ##==============================================================================
    def setEffect(self, effect):
        self.effect = effect

    ##==============================================================================
    def draw_str(self, dst, (x, y), s):
        cv2.putText(dst, s, (x + 2, y + 2), cv2.FONT_HERSHEY_COMPLEX, 1.0,
                    cv.RGB(0, 255, 0), thickness=2, lineType=cv2.CV_AA, bottomLeftOrigin=False)
        cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_COMPLEX, 1.0,
                    cv.RGB(255, 0, 0), lineType=cv2.CV_AA, bottomLeftOrigin=False)
Пример #12
0
    def detectFace(self, cam_img, faceCascade, eyeCascade, mouthCascade):  # cam_img should be cv2.cv.iplcam_img
        min_size = (20, 20)
        image_scale = 2
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0
        image_width = int(cam_img.get(cv.CV_CAP_PROP_FRAME_WIDTH))
        image_height = int(cam_img.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
        # Allocate the temporary images
        gray = cv.CreateImage((image_width, image_height), 8, 1)  # tuple as the first arg
        smallImage = cv.CreateImage((cv.Round(image_width / image_scale), cv.Round(image_height / image_scale)), 8, 1)

        (ok, img) = cam_img.read()
        # print 'gray is of ',type(gray) >>> gray is of  <type 'cv2.cv.iplimage'>
        # print type(smallImage)  >>> <type 'cv2.cv.iplimage'>
        # print type(image) >>> <type 'cv2.VideoCapture'>
        # print type(img) >>> <type 'numpy.ndarray'>

        # convert numpy.ndarray to iplimage
        ipl_img = cv2.cv.CreateImageHeader((img.shape[1], img.shape[0]), cv.IPL_DEPTH_8U, 3)
        cv2.cv.SetData(ipl_img, img.tostring(), img.dtype.itemsize * 3 * img.shape[1])

        # Convert color input image to grayscale
        cv.CvtColor(ipl_img, gray, cv.CV_BGR2GRAY)

        # Scale input image for faster processing
        cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

        # Equalize the histogram
        cv.EqualizeHist(smallImage, smallImage)

        # Detect the faces
        faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        # => The function returns a list of tuples, (rect, neighbors) , where rect is a CvRect specifying the object’s extents and neighbors is a number of neighbors.
        # => CvRect cvRect(int x, int y, int width, int height)
        # If faces are found
        if faces:
            face = faces[0]
            self.faceX = face[0][0]
            self.faceY = face[0][1]

            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(ipl_img, pt1, pt2, cv.RGB(0, 0, 255), 3, 8, 0)
                # face_region = cv.GetSubRect(ipl_img,(x,int(y + (h/4)),w,int(h/2)))

            cv.SetImageROI(ipl_img, (pt1[0],
                                     pt1[1],
                                     pt2[0] - pt1[0],
                                     int((pt2[1] - pt1[1]) * 0.7)))

            eyes = cv.HaarDetectObjects(ipl_img, eyeCascade,
                                        cv.CreateMemStorage(0),
                                        haar_scale, min_neighbors,
                                        haar_flags, (15, 15))

            if eyes:
                # For each eye found
                for eye in eyes:
                    # Draw a rectangle around the eye
                    cv.Rectangle(ipl_img,  # image
                                 (eye[0][0],  # vertex pt1
                                  eye[0][1]),
                                 (eye[0][0] + eye[0][2],  # vertex pt2 opposite to pt1
                                  eye[0][1] + eye[0][3]),
                                 cv.RGB(255, 0, 0), 1, 4, 0)  # color,thickness,lineType(8,4,cv.CV_AA),shift

        cv.ResetImageROI(ipl_img)

        return ipl_img
Пример #13
0
FACE_UD_STATE_CHANGE_THRESH = 1
FACE_ALTERNATION_THRESH = 2
FACE_ONE_DIMENSION_THRESH = 2
FACE_STILL_THRESHOLD = 3
FACE_ALTERNATIONS_EXPIRE = 6

#Face movement enumeration
OTHER = 0
STILL = 1
LEFT = 2
RIGHT = 3
UP = 4
DOWN = 5

#Color donstant definitions
RED = cv2.RGB(255, 0, 0)
GREEN = cv2.RGB(0, 220, 0)
BLUE = cv2.RGB(0, 0, 255)
YELLOW = cv2.RGB(255, 255, 0)
ORANGE = cv2.RGB(255, 127, 0)
MAGENTA = cv2.RGB(255, 0, 255)

# other constants
scale = 1
cascade = None
storage = cv2.CreateMemStorage(0)
cascade_name = "xml/haarcascade_frontalface_alt.xml"
min_size = (FACE_MIN_SIZE, FACE_MIN_SIZE)
image_scale = 1.3
haar_scale = 1.2
min_neighbors = 2
Пример #14
0
    def add_keypoints(self, track_box):
        # Look for any new keypoints around the current keypoints

        # Begin with a mask of all black pixels
        mask = np.zeros_like(self.grey)

        # Get the coordinates and dimensions of the current track box
        try:
            ((x, y), (w, h), a) = track_box
        except:
            try:
                x, y, w, h = track_box
                x = x + w / 2
                y = y + h / 2
                a = 0
            except:
                rospy.loginfo("Track box has shrunk to zero...")
                return

        x = int(x)
        y = int(y)

        # Expand the track box to look for new keypoints
        w_new = int(self.expand_roi * w)
        h_new = int(self.expand_roi * h)

        pt1 = (x - int(w_new / 2), y - int(h_new / 2))
        pt2 = (x + int(w_new / 2), y + int(h_new / 2))

        mask_box = ((x, y), (w_new, h_new), a)

        # Display the expanded ROI with a yellow rectangle
        if self.show_add_drop:
            cv2.rectangle(self.marker_image, pt1, pt2, cv.RGB(255, 255, 0))

        # Create a filled white ellipse within the track_box to define the ROI
        cv2.ellipse(mask, mask_box, cv.CV_RGB(255, 255, 255), cv.CV_FILLED)

        if self.keypoints is not None:
            # Mask the current keypoints
            for x, y in [np.int32(p) for p in self.keypoints]:
                cv2.circle(mask, (x, y), 5, 0, -1)

        new_keypoints = cv2.goodFeaturesToTrack(self.grey,
                                                mask=mask,
                                                **self.gf_params)

        # Append new keypoints to the current list if they are not
        # too far from the current cluster
        if new_keypoints is not None:
            for x, y in np.float32(new_keypoints).reshape(-1, 2):
                distance = self.distance_to_cluster((x, y), self.keypoints)
                if distance > self.add_keypoint_distance:
                    self.keypoints.append((x, y))
                    # Briefly display a blue disc where the new point is added
                    if self.show_add_drop:
                        cv2.circle(self.marker_image, (x, y), 3,
                                   (255, 255, 0, 0), cv.CV_FILLED, 2, 0)

            # Remove duplicate keypoints
            self.keypoints = list(set(self.keypoints))
    midFace = None

    if (cascade):
        # HaarDetectObjects takes 0.02s
        faces = cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)
        if faces:
            lights(50 if len(faces) == 0 else 0, 50 if len(faces) > 0 else 0,
                   0, 50)

            for ((x, y, w, h), n) in faces:
                # the input to cv2.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv2.Rectangle(frame, pt1, pt2, cv2.RGB(100, 220, 255), 1, 8, 0)
                # get the xy corner co-ords, calc the midFace location
                x1 = pt1[0]
                x2 = pt2[0]
                y1 = pt1[1]
                y2 = pt2[1]

                midFaceX = x1 + ((x2 - x1) / 2)
                midFaceY = y1 + ((y2 - y1) / 2)
                midFace = (midFaceX, midFaceY)

                offsetX = midFaceX / float(frame.width / 2)
                offsetY = midFaceY / float(frame.height / 2)
                offsetX -= 1
                offsetY -= 1
def draw_circles(storage, output):
    circles = np.asarray(storage)
    for circle in circles:
        Radius, x, y = int(circle[0][3]), int(circle[0][0]), int(circle[0][4])
        cv2.Circle(output, (x, y), 1, cv2.RGB(0, 255, 0), -1, 8, 0)
        cv2.Circle(output, (x, y), Radius, cv2.RGB(255, 0, 0), 3, 8, 0)