def __init__(self, frame, rect, time_makes_difference=False):
        self.rect = rect
        self.time_makes_difference = time_makes_difference
        self.face_photo_valid = False
        self.face_size = [rect[2], rect[3]]
        self.personID = -1
        self.white_level = 6
        try:
            sub_image = cv.GetSubRect(frame, rect)
        except:
            return None
        self.frame_copy = cv.CreateImage((rect[2], rect[3]), cv.IPL_DEPTH_8U,
                                         frame.nChannels)

        if frame.origin == cv.IPL_ORIGIN_TL:
            cv.Copy(sub_image, self.frame_copy)
        else:
            cv.Flip(sub_image, self.frame_copy, 0)
        self.find_eyes()

        if self.is_a_valid_face_photo():
            self.find_wrinkles()
            self.find_avarage_colors()
            self.find_gender_age_emotions()
            #self.count_face_vector()
            self.create_face_vector()
def decode(imgpath, mark, bits):
    """
    Given a ES&S-style ballot, returns the LHS barcode as a bitstring. 
    Will try to detect and report flipped ballots.
    Input:
        imgpath : path to ballot image
        mark    : image of mark
        bits    : number of bits expected in barcode
    Output:
        bitstring     : string for detected barcode
        is_flipped    : boolean indicating whether ballot was flipped
        bit_locations : {str bit_value: [(x1,y1,x2,y2), ...]}
    """

    is_flipped = False
    img = cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_GRAYSCALE)
    bitstring, bit_locations = decode_patch(img, mark, bits)
    if not bitstring:
        is_flipped = True
        w, h = cv.GetSize(img)
        tmp = cv.CreateImage((w, h), img.depth, img.channels)
        cv.Flip(img, tmp, flipMode=-1)
        img = tmp
        bitstring, bit_locations = decode_patch(img, mark, bits)
    return bitstring, is_flipped, bit_locations
Exemple #3
0
  def step(self, show):
    r_image = cv.QueryFrame(self.capture)

    if Camera.EXTERNAL:
      self.image = cv.CreateImage((r_image.height, r_image.width), r_image.depth, r_image.channels)
      cv.Transpose(r_image, self.image)
      cv.Flip(self.image, self.image, flipMode=0)
    else:
      self.image = r_image


    self.detect_faces()

    for (x,y,w,h) in self.faces:
      cv.PutText(self.image, "LOOK AT THIS IDIOT", (0, 30), Camera.FONT, Camera.RED)
      cv.PutText(self.image, str(time.time()), (self.image.width - 275, self.image.height - 20), Camera.FONT, Camera.RED)
      cv.Line(self.image, (0, y+h/2), (self.image.width, y+h/2), Camera.RED)
      cv.Line(self.image, (x+w/2, 0), (x+w/2, self.image.height), Camera.RED)
      cv.Circle(self.image, (x+w/2, y+h/2), min(h/2, w/2), Camera.RED, thickness=2)
      cv.Circle(self.image, (x+w/2, y+h/2), min(h/8, w/8), Camera.RED)
      #arrow(self.image, (200, 40), (x, y), Camera.WHITE)

    if show:
      print self.faces
      cv.ShowImage("w1", self.image)
      cv.WaitKey(1)

    return self.image
Exemple #4
0
    def run(self):
        # check if capture device is OK
        if not self.capture:
            print "Error opening capture device"
            sys.exit(1)

        self.frame = cv.QueryFrame(self.capture)
        self.image_size = cv.GetSize(self.frame)

        # create grayscale version
        self.grayscale = cv.CreateImage(self.image_size, 8, 1)

        # create storage
        self.storage = cv.CreateMemStorage(128)
        self.cascade = cv.Load(haarfile)

        while 1:
            # do forever
            # capture the current frame
            self.frame = cv.QueryFrame(self.capture)
            if self.frame is None:
                break

            # mirror
            cv.Flip(self.frame, None, 1)

            # face detection
            self.detect()

            # display webcam image
            if showvideo:
                cv.ShowImage('CamShiftDemo', self.frame)

            # handle events
            k = cv.WaitKey(10)
Exemple #5
0
 def grab_frame(self):
     self.frame = cv.QueryFrame(self.capture)
     if not self.frame:
         print "can't grab frame"
         sys.exit(2)
     cv.Flip(self.frame, None, 1)
     return self.frame
Exemple #6
0
  def run(self):
    k = cv.WaitKey(self.msdelay)
    k = chr(k) if k > 0 else 0
    if handle_keyboard(k) < 0:
        return False
    bgrimg = cv.QueryFrame(self.cam)
    if not bgrimg:
        return False
    bgrimg = im.resize(bgrimg, width=400)
    cv.Flip(bgrimg, None, 1)

    contours = self.session.process(bgrimg)

    max_contours = None
    if contours:
        max_contours = im.top_two_max_contours(contours)

    if max_contours:
        img = bgrimg
        #cv.Clone(bgrimg)
        cts = []
        for ct in max_contours:
            if ct[1]: cts.append(ct[1])
        finger_tips = im.get_finger_tips(cts, img)

        self.session.translate(finger_tips, img)
        if self.debug == 1: cv.ShowImage(self.proc_win_name, img)

        self.dumpScreen(img)

    return True
Exemple #7
0
def setup(flipped, capture, thehandcolor):
    """Initializes camera and finds initial skin tone"""

    #creates initial window and prepares text
    color = (40, 0, 0)
    font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0)
    textsize1 = (cv.GetSize(flipped)[0] / 2 - 150,
                 cv.GetSize(flipped)[1] / 2 - 140)
    textsize2 = (cv.GetSize(flipped)[0] / 2 - 150,
                 cv.GetSize(flipped)[1] / 2 - 110)
    point1 = (cv.GetSize(flipped)[0] / 2 - 25, cv.GetSize(flipped)[1] / 2 - 25)
    point2 = (cv.GetSize(flipped)[0] / 2 + 25, cv.GetSize(flipped)[1] / 2 + 25)

    #until Enter is pressed
    while (cv.WaitKey(10) != 10):

        #captures live video, and draws sub-box and text
        frame = cv.QueryFrame(capture)
        cv.Copy(frame, flipped)
        cv.Flip(flipped, flipped, 1)
        cv.Rectangle(flipped, point1, point2, color, 2)
        cv.PutText(flipped, "Put your hand in the box ", textsize1, font,
                   color)
        cv.PutText(flipped, "and press enter", textsize2, font, color)
        cv.ShowImage("w2", flipped)

    #Creates sub-image inside box, and returns average color in box
    sub = cv.GetSubRect(flipped, (cv.GetSize(flipped)[0] / 2 - 25,
                                  cv.GetSize(flipped)[1] / 2 - 25, 50, 50))
    cv.Set(thehandcolor, cv.Avg(sub))
    return cv.Avg(sub)
def Color_callibration(capture):
    vals = []
    bgr = []
    mini = [255, 255, 255]
    maxi = [0, 0, 0]
    cv.NamedWindow("BGR", 0)
    print 'Please Put Your color in the circular area.Press ESC to start callibration:'
    while 1:
        image = cv.QueryFrame(capture)
        cv.Flip(image, image, 1)
        cv.Circle(image, (int(200), int(300)), 20, cv.CV_RGB(255, 255, 255), 6)
        cv.ShowImage("BGR", image)
        c = cv.WaitKey(33)
        if c == 27:
            break
    for i in range(0, 10):
        image = cv.QueryFrame(capture)
        cv.Flip(image, image, 1)
        cv.Smooth(image, image, cv.CV_GAUSSIAN, 3, 0)
        imagehsv = cv.CreateImage(cv.GetSize(image), 8, 3)
        cv.CvtColor(image, imagehsv, cv.CV_BGR2HSV)
        vals = cv.Get2D(imagehsv, 300, 200)
        print 'blue:', vals[0], 'green:', vals[1], 'red:', vals[2]
        for j in range(0, 3):
            if (vals[j] < mini[j]): mini[j] = vals[j]
            if (vals[j] > maxi[j]): maxi[j] = vals[j]
        cv.Circle(image, (int(200), int(300)), 20, cv.CV_RGB(255, 255, 255), 6)
        cv.ShowImage("BGR", image)
        c = cv.WaitKey(33)
        if c == 27:
            break
    mini[0] -= 10
    mini[1] -= 20
    mini[2] -= 50
    maxi[0] += 10
    maxi[1] += 20
    maxi[2] += 50
    for i in range(0, 3):
        if (mini[i] < 0):
            mini[i] = 0
        if (maxi[i] > 255):
            maxi[i] = 255
    cv.DestroyWindow("BGR")
    print mini
    print maxi
    bgr = (mini, maxi)
    return bgr
Exemple #9
0
 def grab_frame(self):
     self.frame = cv.QueryFrame(self.capture)
     if not self.frame:
         print "can't grap frame, or end of movie. Bye bye."
         sys.exit(2)
     if self.flip:
         cv.Flip(self.frame, None, 1)
     return self.frame
Exemple #10
0
def rotateImage(image):
	# transposed image
	timg = cv.CreateImage((image.height, image.width), image.depth, image.channels)

	# rotate clockwise
	cv.Transpose(image, timg)
	cv.Flip(timg, timg, flipMode=1)
	return timg
Exemple #11
0
 def _rotate_code(self, cod, ori):
     '''
     Rotates matrix cod depending on the value of ori
     @param cod:
     @param ori:
     '''
     if ori == self.TOP_LEFT:
         pass
     elif ori == self.TOP_RIGHT:
         cv.Transpose(cod, cod)
         cv.Flip(cod)
         return cod
     elif ori == self.BOT_RIGHT:
         cv.Flip(cod, cod, -1)
     else:
         cv.Transpose(cod, cod)
         cv.Flip(cod, cod, 1)
Exemple #12
0
    def runVideo(self):
        frameCount = 0
        x = self.xFont
        y = self.yFont
        font = self.videoFont
        tom2Coords = self.tom2Coords
        tom1Coords = self.tom1Coords
        snareCoords = self.snareCoords
        hihatCoords = self.hihatCoords

        i = 2

        while True:
            if i % 2 == 0:
                frame = cv.QueryFrame(capture)

                self.redStick.drawBoundingCircle(frame)
                self.blueStick.drawBoundingCircle(frame)

                self.redStick.appendCentersList()
                self.blueStick.appendCentersList()

                self.redStick.findDelta()
                self.blueStick.findDelta()

                self.redStick.playSounds()
                self.blueStick.playSounds()

                cv.Rectangle(frame, (tom1Coords[0], tom1Coords[1]),
                             (tom1Coords[2], tom1Coords[3]), (255, 0, 0), 0)

                cv.Rectangle(frame, (tom2Coords[0], tom2Coords[1]),
                             (tom2Coords[2], tom2Coords[3]), (0, 255, 0), 0)

                cv.Rectangle(frame, (snareCoords[0], snareCoords[1]),
                             (snareCoords[2], snareCoords[3]), (0, 0, 255), 0)

                cv.Rectangle(frame, (hihatCoords[0], hihatCoords[1]),
                             (hihatCoords[2], hihatCoords[3]), (125, 125, 0),
                             0)

                cv.Flip(frame, frame, 1)
                cv.PutText(frame, "Press q or esc to terminate.", (x, y), font,
                           (0, 255, 0))
                cv.PutText(frame, "Current Velocities:", (x, y + 50), font,
                           (0, 255, 255))
                cv.PutText(frame, str(self.redStick.delta), (x, y + 100), font,
                           (0, 0, 255))
                cv.PutText(frame, str(self.blueStick.delta),
                           (x + 100, y + 100), font, (255, 0, 0))
                cv.ShowImage("DrumMaster 9000!", frame)
                key = cv.WaitKey(7)
                if key == 27 or key == 113:
                    cv.DestroyAllWindows()
                    pygame.quit()
                    exit()
                    break
                frameCount += 1
Exemple #13
0
    def bodies_from_image(self, image, flip=True):
        if flip:
            flipped = cv.CreateImage(
                (image.width, image.height), image.depth, image.channels)
            cv.Flip(image, flipped, flipMode=0)
            image = flipped

        contours = self.detect_outline(image)
        return self.objects_from_contours(contours)
Exemple #14
0
 def _build_image(self, frame):
     if not self._frame:
         self._frame = cv.CreateImage((frame.width, frame.height),
                                      cv.IPL_DEPTH_8U, frame.nChannels)
     if frame.origin == cv.IPL_ORIGIN_TL:
         cv.Copy(frame, self._frame)
     else:
         cv.Flip(frame, self._frame, 0)
     return IplQImage(self._frame)
def draw_bbs(imgpath, decoding, bbs, isflip=False):
    Icolor = cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_COLOR)
    if isflip:
        cv.Flip(Icolor, Icolor, -1)
    for i, (x1, y1, x2, y2) in enumerate(bbs):
        color = cv.CV_RGB(255, 0, 0) if decoding[i] == '0' else cv.CV_RGB(
            0, 0, 255)
        cv.Rectangle(Icolor, (x1, y1), (x2, y2), color, thickness=2)
    return Icolor
    def find_avarage_colors(self):
        width, height = self.frame_copy.width, self.frame_copy.height
        frame = self.frame_copy
        self.av_color, self.stddev = cv.AvgSdv(self.frame_copy)

        half_width, half_height = width / 3, height / 3

        upper_rect = (0, 0, half_width, half_height)
        lower_rect = (half_width, half_height, half_width - 1, half_height - 1)
        upper_sub_image = cv.GetSubRect(self.frame_copy, upper_rect)
        upper_frame = cv.CreateImage((upper_rect[2], upper_rect[3]),
                                     cv.IPL_DEPTH_8U, frame.nChannels)

        if frame.origin == cv.IPL_ORIGIN_TL:
            cv.Copy(upper_sub_image, upper_frame)
        else:
            cv.Flip(upper_sub_image, upper_frame, 0)
        u_av, u_sd = cv.AvgSdv(upper_frame)

        lower_sub_image = cv.GetSubRect(self.frame_copy, lower_rect)
        lower_frame = cv.CreateImage((lower_rect[2], lower_rect[3]),
                                     cv.IPL_DEPTH_8U, frame.nChannels)
        if frame.origin == cv.IPL_ORIGIN_TL:
            cv.Copy(lower_sub_image, lower_frame)
        else:
            cv.Flip(lower_sub_image, lower_frame, 0)
        l_av, l_sd = cv.AvgSdv(lower_frame)

        l_av = [l_av[0], l_av[1], l_av[2]]
        l_sd = [l_sd[0], l_sd[1], l_sd[2]]
        if l_av[0] < 0.00001: l_av[0] = 0.0001
        if l_av[1] < 0.00001: l_av[1] = 0.0001
        if l_av[2] < 0.00001: l_av[2] = 0.0001
        if l_sd[0] < 0.00001: l_sd[0] = 0.0001
        if l_sd[1] < 0.00001: l_sd[1] = 0.0001
        if l_sd[2] < 0.00001: l_sd[2] = 0.0001

        self.u2l_ratio = (u_av[0] / l_av[0], u_av[1] / l_av[1],
                          u_av[2] / l_av[2])
        self.u2l_sd_ratio = (u_sd[0] / l_sd[0], u_sd[1] / l_sd[1],
                             u_sd[2] / l_sd[2])

        del upper_frame
        del lower_frame
Exemple #17
0
 def sub_image(self, imagecurr, imageprev, divid=True):
     imagesize = (imagecurr.width, imagecurr.height)
     image = cv.CreateImage(imagesize, cv.IPL_DEPTH_8U, 1)
     cv.Sub(imagecurr, imageprev, image)
     # use pyramid/cone to ponderate the weight
     # ie. moves in corners are more important than in the center
     if divid:
         cv.Div(image, self.cone, image)
     cv.Flip(image, flipMode=1) # for webcam
     return image
Exemple #18
0
def showImage(svs, img):
    #パーティクル描画用のコピー
    dst = cv.CloneImage(img.image)
    #パーティクルを書き込み
    for sv in svs:
        #!!!パーティクル位置をintに変換している
        cv.Circle(dst, (int(sv[0]), int(sv[1])), 3, cv.CV_RGB(0, 0, 255))
    #表示
    cv.Flip(dst, flipMode=1)
    cv.ShowImage("Capture", dst)
Exemple #19
0
 def get_frame(self):
     self.raw_image = self.get_raw_frame()
     im = self.undistort_frame()
     if self.config.has_key('upside_down'):
         if self.config['upside_down']:
             if self.corrected_orientation == None:
                 self.corrected_orientation = cv.CloneImage(im)
             cv.Flip(im, self.corrected_orientation, -1)
             im = self.corrected_orientation
     return im
def drawit(I, bbs, imgpath, isflip=False):
    Icolor = cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_COLOR)
    if isflip:
        cv.Flip(Icolor, Icolor, -1)
    cv.SetImageROI(Icolor, cv.GetImageROI(I))
    for (x1, y1, x2, y2) in bbs:
        cv.Rectangle(Icolor, (x1, y1), (x2, y2), cv.CV_RGB(255, 0, 0))
    print "<><><><> Saving '_Icolor.png' <><><><>"
    cv.SaveImage("_Icolor.png", Icolor)
    pdb.set_trace()
Exemple #21
0
def FPV_thread():
    global camera_index
    global capture
    global WINDOW_NAME
    global latest_frame
    global FPV_thread_stop
    global overlay_message  # shared with application return results
    global face_position    # shared with application return results

    FPV_init()

    cv.NamedWindow(WINDOW_NAME, cv.CV_WINDOW_NORMAL)
    cv.MoveWindow(WINDOW_NAME, 0, 0)

    width_scale = 1.0
    height_scale = 1.0
    while True:
        frame = cv.QueryFrame(capture)
        cv.Flip(frame, None, 1)

        #copy to buffer
        frame_lock.acquire()
        original_imagesize = (0,0)
        resized_imagesize = (0,0)
        if not latest_frame:
            latest_frame = cv.CreateImage((640, 480), frame.depth, frame.nChannels)
            original_imagesize = cv.GetSize(frame)
            resized_imagesize = cv.GetSize(latest_frame)
            width_scale = original_imagesize[0]*1.0/resized_imagesize[0]
            height_scale = original_imagesize[1]*1.0/resized_imagesize[1]
        cv.Resize(frame, latest_frame)
        frame_lock.release()


        #Display Result
        text_start_point = (10, 50)
        cv.PutText(frame, overlay_message, text_start_point, font, cv.Scalar(255,255,255))
        cv.Rectangle(frame, text_start_point, (original_imagesize[0], 100), cv.Scalar(0,0,0), thickness=cv.CV_FILLED)
        if face_position[0] > 0.0:
            point1 = (int(face_position[0]*width_scale), int(face_position[1]*height_scale))
            point2 = (int((face_position[0] + face_position[2])*width_scale), \
                    int((face_position[1]+face_position[3])*height_scale))
            cv.Rectangle(frame, point1, point2, \
                    cv.Scalar(255, 255, 255), thickness=2)
        cv.ShowImage(WINDOW_NAME, frame)
        cv.ResizeWindow(WINDOW_NAME, 200, 100)
        cv.NamedWindow(WINDOW_NAME, cv.CV_WINDOW_NORMAL);
        cv.SetWindowProperty(WINDOW_NAME, 0, cv.CV_WINDOW_FULLSCREEN);
        c = cv.WaitKey(10)
        if c == ord('q'):
            break

    print "[INFO] FPV Thread is finished"
    FPV_thread_stop = True
    FPV_close()
Exemple #22
0
    def show(self, flip=True):
        """
		Grab an image from the webcam and show it in a :func:`cv.NamedWindow` called 'webcam'.
		
		**Parameters:**
			* flip (bool) - Used to rectify the image if using a webcam which is facing you.  
		"""
        frame = cv.QueryFrame(self.cap)
        if flip:
            cv.Flip(frame, flipMode=1)
        show(frame, 'webcam')
Exemple #23
0
def runVideo():
    frameCount = 0
    snareHit = tom1Hit = tom2Hit = False
    redStick = Stick("red")
    blueStick = Stick("blue")

    i = 2

    while True:
        if i % 2 == 0:
            frame = cv.QueryFrame(capture)

            redStick.drawBoundingCircle(frame)
            blueStick.drawBoundingCircle(frame)

            redStick.appendCentersList()
            blueStick.appendCentersList()

            redStick.playSounds()
            blueStick.playSounds()

            cv.Rectangle(frame, (tom1Coords[0], tom1Coords[1]),
                         (tom1Coords[2], tom1Coords[3]), (255, 0, 0), 0)

            cv.Rectangle(frame, (tom2Coords[0], tom2Coords[1]),
                         (tom2Coords[2], tom2Coords[3]), (0, 255, 0), 0)

            cv.Rectangle(frame, (snareCoords[0], snareCoords[1]),
                         (snareCoords[2], snareCoords[3]), (0, 0, 255), 0)

            cv.Rectangle(frame, (hihatCoords[0], hihatCoords[1]),
                         (hihatCoords[2], hihatCoords[3]), (125, 125, 0), 0)

            if redStick.hasBeenStill():
                synchNoise.play()

            #print redStick.stillness
            #print redStick.last50Centers
            #print blueStick.last50Centers

            #snapshot = cam.get_image(snapshot)
            #screen.blit(snapshot, (0,0))

    ##        for event in pygame.event.get():
    ##            if event.type == pygame.KEYDOWN:
    ##                if event.key == pygame.K_h:
    ##                    runHelpScreen()
    ##
            cv.Flip(frame, frame, 1)
            cv.ShowImage("DrumMaster 9000!", frame)
            key = cv.WaitKey(7)
            if key == 27:
                break
            frameCount += 1
Exemple #24
0
def FormatImage(img, oimg, off, scale, correction):
    global i01

    #print img.height,img.width
    #print oimg.height,oimg.width
    cv.Transpose(img,oimg)
    cv.Flip(oimg,None,0)

    if(correction):
        cv.AddS(oimg, off, oimg)
        cv.Div(oimg, i01, oimg, scale)
Exemple #25
0
def get_next_frame(video):
    """Captures next frame in the video
	@param video : opencv video handle
	"""

    image = cv.QueryFrame(video)
    cv.Flip(image, None, 0)
    # cv.CvtColor(image, image, cv.CV_BGR2RGB)
    image_arr = ipl2tex(image)

    return image_arr
Exemple #26
0
    def depth_callback(self, data):
        depth_image = self.convert_depth_image(data)

        if self.flip_image:
            cv.Flip(depth_image)

        if not self.depth_image:
            (cols, rows) = cv.GetSize(depth_image)
            self.depth_image = cv.CreateMat(rows, cols, cv.CV_32FC1)

        cv.Copy(depth_image, self.depth_image)
    def frame(self):
        if self.capture_started:
            frame = cv.QueryFrame(self.capture)

            if not self.frame_copy:
                self.frame_copy = cv.CreateImage((frame.width, frame.height),
                                                 cv.IPL_DEPTH_8U,
                                                 frame.nChannels)

            if frame.origin == cv.IPL_ORIGIN_TL:
                cv.Copy(frame, self.frame_copy)
            else:
                cv.Flip(frame, self.frame_copy, 0)
Exemple #28
0
    def get(self, flip=True):
        """
		Grab an image from the webcam.
		
		**Parameters:**
			* flip (bool) - Used to rectify the image if using a webcam which is facing you.  
		
		**Returns:**
			The webcam image.
		"""
        frame = cv.QueryFrame(self.cap)
        if flip:
            cv.Flip(frame, flipMode=1)
        return frame
Exemple #29
0
    def __init__(self, frame, rect, verify_detected_faces, run_face_analysis):
        self.rect = rect
        self.face_photo_valid = False
        self.face_size = [rect[2], rect[3]]
        self.personID = -1
        self.white_level = 6
        self.size = (92, 112)  #180,180

        #wycina tylko twarz
        try:
            sub_image = cv.GetSubRect(frame, rect)
        except:
            return None
        self.frame_copy = cv.CreateImage((rect[2], rect[3]), cv.IPL_DEPTH_8U,
                                         frame.nChannels)

        if frame.origin == cv.IPL_ORIGIN_TL:
            cv.Copy(sub_image, self.frame_copy)
        else:
            cv.Flip(sub_image, self.frame_copy, 0)
        self.path = self.prepare_face_picture(
        )  # zmniejsza zdjecie twarzy by bylo dobre do analizy
        if verify_detected_faces:
            self.validate_face_photo(
            )  # sprawdza czy na zdjeciu twarzy wystepuje nos, jesli wystepuje to znaczy ze to zdjecie twarzy
        else:
            self.face_photo_valid = 1
        if self.is_a_valid_face_photo():
            self.time = time.time()
            self.wrinkles = self.find_wrinkles()
            self.A2B_ratio, self.A_B_color, self.C_color = self.find_avarage_colors(
            )
            if run_face_analysis:
                self.gender, self.age, self.emotions = self.find_gender_age_emotions(
                )
                gender_ratio, age_ratio, emotions_ratio = round(
                    self.gender / 3.0,
                    3), round(self.age / 18.0,
                              3), round(self.emotions / 3.0, 3)
            else:
                self.gender, self.age, self.emotions, self.position = 0, 0, 0, 1
                gender_ratio, age_ratio, emotions_ratio = 0, 0, 0

            self.x_position_ratio = self.count_position_ratio(
                rect[0], frame.width)
            self.create_face_vector([
                self.time, self.wrinkles, self.A2B_ratio, self.A_B_color,
                self.C_color, gender_ratio, age_ratio, emotions_ratio,
                self.x_position_ratio
            ])
def extract_digitpatch(jobs):
    for (imgpath, (x1,y1,x2,y2), outpath, isflip) in jobs:
        try:
            os.makedirs(os.path.split(outpath)[0])
        except:
            pass
        I = cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_UNCHANGED)

        if isflip:
            cv.Flip(I, I, flipMode=-1)

        cv.SetImageROI(I, tuple(map(int, (x1,y1,x2-x1, y2-y1))))
        cv.SaveImage(outpath, I)
    return True