Example #1
0
 def update_histogram(self, region):
     cv.SetImageROI(self.hue, region)
     cv.SetImageROI(self.sat, region)
     cv.CalcArrHist([self.hue, self.sat], self.face_hist, 1)
     #cv.NormalizeHist(self.face_hist, 255)
     cv.ResetImageROI(self.hue)
     cv.ResetImageROI(self.sat)
Example #2
0
def SubImage(img, region):
    '''return a subimage as a new image. This allows
	for the region going past the edges.
	region is of the form (x1,y1,width,height)'''
    (x1, y1, width, height) = region
    zeros = numpy.zeros((height, width, 3), dtype='uint8')
    ret = cv.GetImage(cv.fromarray(zeros))
    (img_width, img_height) = cv.GetSize(img)
    if x1 < 0:
        sx1 = 0
        xofs = -x1
    else:
        sx1 = x1
        xofs = 0
    if y1 < 0:
        sy1 = 0
        yofs = -y1
    else:
        sy1 = y1
        yofs = 0
    if sx1 + width <= img_width:
        w = width
    else:
        w = img_width - sx1
    if sy1 + height <= img_height:
        h = height
    else:
        h = img_height - sy1
    cv.SetImageROI(img, (sx1, sy1, w - xofs, h - yofs))
    cv.SetImageROI(ret, (xofs, yofs, w - xofs, h - yofs))
    cv.Copy(img, ret)
    cv.ResetImageROI(img)
    cv.ResetImageROI(ret)
    return ret
Example #3
0
def matchsize(A, B):
    """ Given two cvMats A, B, returns a cropped/padded version of
    A that has the same dimensions as B.
    """
    wA, hA = cv.GetSize(A)
    wB, hB = cv.GetSize(B)
    if wA == wB and hA == hB:
        return A
    SetImageROI = cv.SetImageROI
    out = cv.CreateImage((wB, hB), A.depth, A.channels)
    wOut, hOut = cv.GetSize(out)
    if wA < wOut and hA < hOut:
        SetImageROI(out, (0, 0, wA, hA))
    elif wA >= wOut and hA < hOut:
        SetImageROI(out, (0, 0, wOut, hA))
        SetImageROI(A, (0, 0, wOut, hA))
    elif wA < wOut and hA >= hOut:
        SetImageROI(out, (0, 0, wA, hOut))
        SetImageROI(A, (0, 0, wA, hOut))
    else:  # wA >= wOut and hA >= hOut:
        SetImageROI(A, (0, 0, wOut, hOut))
    cv.Copy(A, out)
    cv.ResetImageROI(out)
    cv.ResetImageROI(A)
    return out
Example #4
0
def repaintCCs(image, doRepaint=None, returnMask=False, resizeMask=True, doFillBackground=True, bgPoint=(0, 0), newcol=255, connectivity=4):
    if doRepaint is None:
        doRepaint = lambda comp, col: False
    resultMask = cv.CreateImage((image.width + 2, image.height + 2), image.depth, image.nChannels)
    tempMask = cv.CreateImage((image.width + 2, image.height + 2), image.depth, image.nChannels)
    visitMask = cv.CreateImage((image.width + 2, image.height + 2), image.depth, image.nChannels)
    cv.Zero(resultMask)
    cv.Zero(tempMask)
    cv.Zero(visitMask)
    if doFillBackground:
        cv.FloodFill(image, bgPoint, 0, 0, 0, connectivity + cv.CV_FLOODFILL_MASK_ONLY + (255 << 8), visitMask)
    for x in xrange(image.width):
        for y in xrange(image.height):
            if visitMask[y + 1, x + 1] == 255:
                continue
            comp = cv.FloodFill(image, (x, y), 0, 0, 0, connectivity + cv.CV_FLOODFILL_MASK_ONLY + (255 << 8), tempMask)
            region = shiftRect(comp[2], 1, 1)
            cv.SetImageROI(tempMask, region)
            cv.SetImageROI(visitMask, region)
            cv.Or(tempMask, visitMask, visitMask)
            if doRepaint(comp, image[y, x]):
                cv.SetImageROI(resultMask, region)
                cv.Or(tempMask, resultMask, resultMask)
                cv.ResetImageROI(resultMask)
            cv.Zero(tempMask)
            cv.ResetImageROI(tempMask)
            cv.ResetImageROI(visitMask)
    if returnMask:
        if resizeMask: return cap.getSubImage(resultMask, (1, 1, image.width, image.height))
        else: return resultMask
    else:    
        cv.SetImageROI(resultMask, (1, 1, image.width, image.height))
        cv.Set(image, newcol, resultMask)
        return image
Example #5
0
def findCCs(image, erasecol=0, doContinue=None, doSkip=None, bRange=0, connectivity=8):
    """
    Finds all connected components in the image.
    doContinue is a function applied to the color of every new pixel in the image.
    If it is true, this pixel is ignored. Default: <= 128
    doSkip is a function applied to every new connected component found by the
    function. If it is true, this component will not be included in the result.
    Default: do not skip anything.
    """
    if doContinue is None:
        doContinue = lambda col: col <= 128
    if doSkip is None:
        doSkip = lambda comp: False
    mask = cv.CreateImage((image.width + 2, image.height + 2), cv.IPL_DEPTH_8U, 1)
    cv.Zero(mask)
    components = []
    for x in range(image.width):
        for y in range(image.height):
            if doContinue(image[y, x]):
                continue
            comp = cv.FloodFill(image, (x, y), 0, bRange, bRange, connectivity + cv.CV_FLOODFILL_MASK_ONLY + (255 << 8), mask) # here 3rd argument is ignored
            region = shiftRect(comp[2], 1, 1)
            if not doSkip(comp):
                seg = cvext.getSubImage(mask, region)
                components.append((comp[0], comp[1], comp[2], seg))
            cv.SetImageROI(image, comp[2])
            cv.SetImageROI(mask, region)
            cv.Set(image, erasecol, mask)
            cv.Zero(mask)
            cv.ResetImageROI(image)
            cv.ResetImageROI(mask)
    return components
Example #6
0
    def draw(self, img, pixmapper):
        '''draw the icon on the image'''

        if self.trail is not None:
            self.trail.draw(img, pixmapper)

        icon = self.img()
        (px, py) = pixmapper(self.latlon)

        # find top left
        px -= icon.width / 2
        py -= icon.height / 2
        w = icon.width
        h = icon.height

        (px, py, sx, sy, w, h) = self.clip(px, py, w, h, img)

        cv.SetImageROI(icon, (sx, sy, w, h))
        cv.SetImageROI(img, (px, py, w, h))
        cv.Add(icon, img, img)
        cv.ResetImageROI(img)
        cv.ResetImageROI(icon)

        # remember where we placed it for clicked()
        self.posx = px + w / 2
        self.posy = py + h / 2
Example #7
0
def removeBadBackground(seg):
    threshUp = cv.CreateImage(cv.GetSize(seg), cv.IPL_DEPTH_8U, 1)
    comparison = cv.CreateImage(cv.GetSize(seg), cv.IPL_DEPTH_8U, 1)
    visitMask = cv.CreateImage(cv.GetSize(seg), cv.IPL_DEPTH_8U, 1)
    ffMask = cv.CreateImage((seg.width + 2, seg.height + 2), cv.IPL_DEPTH_8U,
                            1)
    cv.Threshold(seg, threshUp, 1, 255, cv.CV_THRESH_BINARY)
    cv.Zero(visitMask)
    cv.Zero(ffMask)
    for x in xrange(seg.width):
        for y in xrange(seg.height):
            if seg[y, x] != 96 or visitMask[y, x] == 255: continue
            comp = cv.FloodFill(threshUp, (x, y), 0, 0, 0,
                                4 + cv.CV_FLOODFILL_MASK_ONLY + (255 << 8),
                                ffMask)
            rect = comp[2]
            cv.SetImageROI(ffMask, cap.shiftRect(rect, 1, 1))
            cv.OrS(ffMask, 1, ffMask)
            cv.SetImageROI(seg, rect)
            cv.SetImageROI(comparison, rect)
            cv.Cmp(
                seg, ffMask, comparison,
                cv.CV_CMP_EQ)  # 'comparison' does not need to be zeroed later
            intersect = cv.CountNonZero(comparison)
            cv.SetImageROI(visitMask, rect)
            cv.Or(visitMask, ffMask, visitMask)
            cv.ResetImageROI(visitMask)
            if intersect == 0:
                cv.Set(seg, 0, ffMask)
            cv.Zero(ffMask)
            cv.ResetImageROI(seg)
            cv.ResetImageROI(ffMask)
    return seg
Example #8
0
 def threshold(self):
     for x in range(0, self.size[0], 30):
         for y in range(0, self.size[1], 30):
             cv.SetImageROI(self.gray_img, (x, y, 30, 30))
             cv.SetImageROI(self.bw_img, (x, y, 30, 30))
             cv.Threshold(
                 self.gray_img, self.bw_img, 127, 255, cv.CV_THRESH_OTSU)
     cv.ResetImageROI(self.gray_img)
     cv.ResetImageROI(self.bw_img)
Example #9
0
def capture_draw():
    img = cv.QueryFrame(capture)
    # scale your big ole face down to something small
    thumb = cv.CreateMat(img.height / SCALE, img.width / SCALE, cv.CV_8UC3)
    cv.Resize(img, thumb)
    faces = get_face_roi(thumb)
    for (x, y, w, h), n in faces:
        temp_offset = (x * SCALE, y * SCALE)
        cv.SetImageROI(img,
                       ((x) * SCALE, (y) * SCALE, (w) * SCALE, (h) * SCALE))
        roi_image = cv.CreateImage(cv.GetSize(img), img.depth, img.nChannels)
        cv.Copy(img, roi_image)
        cv.ResetImageROI(img)

        cv.Rectangle(img, (x * SCALE, y * SCALE),
                     (x * SCALE + w * SCALE, y * SCALE + h * SCALE),
                     (255, 0, 0))
        cv.PutText(img, 'face', (x * SCALE, y * SCALE), font, (200, 200, 200))

        FEATURE_SCALE = (float(roi_image.width) / ROI_TARGET_SIZE[0],
                         float(roi_image.height) / ROI_TARGET_SIZE[1])
        roi_thumb = cv.CreateImage((int(roi_image.width / FEATURE_SCALE[0]),
                                    int(roi_image.height / FEATURE_SCALE[1])),
                                   cv.IPL_DEPTH_8U, 3)
        cv.Resize(roi_image, roi_thumb)

        features = get_features(roi_thumb)
        cv.ShowImage("ROI", roi_image)
        for name in features:
            if features[name] != None:
                for (x1, y1, w1, h1), n1 in features[name]:
                    cv.SetImageROI(
                        roi_image,
                        (x1 * FEATURE_SCALE[0], y1 * FEATURE_SCALE[1],
                         w1 * FEATURE_SCALE[0], h1 * FEATURE_SCALE[1]))
                    feature_image = cv.CreateImage(cv.GetSize(roi_image),
                                                   roi_image.depth,
                                                   roi_image.nChannels)
                    cv.Copy(roi_image, feature_image)
                    cv.ResetImageROI(feature_image)
                    cv.ShowImage(name, feature_image)
                    cv.PutText(img, name,
                               (temp_offset[0] + x1 * FEATURE_SCALE[0],
                                temp_offset[1] + y1 * FEATURE_SCALE[1]), font,
                               (200, 200, 200))
                    cv.Rectangle(
                        img, (temp_offset[0] + x1 * FEATURE_SCALE[0],
                              temp_offset[1] + y1 * FEATURE_SCALE[1]),
                        (temp_offset[0] +
                         (x1 + w1) * FEATURE_SCALE[0], temp_offset[1] +
                         (y1 + h1) * FEATURE_SCALE[1]), (0, 255, 255))
    cv.ShowImage("Whole Image", img)
 def acquire(self, img):
     self.found_data = False
     cv.SetImageROI(img, self.rect.ToCvRect())
     self.internal_img = cv.CreateImage(cv.GetSize(img), img.depth,
                                        img.nChannels)
     cv.Copy(img, self.internal_img)
     cv.ResetImageROI(img)
Example #11
0
 def do1Image(self, image, prevpoints):
     #http://www.aishack.in/2010/07/tracking-colored-objects-in-opencv/
     #http://nashruddin.com/OpenCV_Region_of_Interest_(ROI)
     #http://opencv-users.1802565.n2.nabble.com/Python-cv-Moments-Need-Help-td6044177.html
     #http://stackoverflow.com/questions/5132874/change-elements-in-a-cvseq-in-python
     img = self.getThreshold(image)
     points = []
     for i in range(4):
         cv.SetImageROI(img, (int(
             self.RectanglePoints[i][0]), int(self.RectanglePoints[i][1]),
                              int(self.RectanglePoints[i][2]),
                              int(self.RectanglePoints[i][3])))
         storage = cv.CreateMemStorage(0)
         contours = cv.FindContours(img, storage)
         moments = cv.Moments(contours)
         moment10 = cv.GetSpatialMoment(moments, 1, 0)
         moment01 = cv.GetSpatialMoment(moments, 0, 1)
         area = cv.GetCentralMoment(moments, 0, 0)
         cv.ResetImageROI(img)
         if (area != 0):
             x = self.RectanglePoints[i][0] + (moment10 / area)
             y = self.RectanglePoints[i][1] + (moment01 / area)
         else:
             if (prevpoints[i][0] == 0):
                 x = self.RectanglePoints[i][0]
                 y = self.RectanglePoints[i][1]
             else:
                 x = prevpoints[i][0]
                 y = prevpoints[i][1]
         points.append([x, y])
     return points
def getIris(frame):
    iris = []
    copyImg = cv.CloneImage(frame)
    resImg = cv.CloneImage(frame)
    grayImg = cv.CreateImage(cv.GetSize(frame), 8, 1)
    mask = cv.CreateImage(cv.GetSize(frame), 8, 1)
    storage = cv.CreateMat(frame.width, 1, cv.CV_32FC3)
    cv.CvtColor(frame, grayImg, cv.CV_BGR2GRAY)
    cv.Canny(grayImg, grayImg, 5, 70, 3)
    cv.Smooth(grayImg, grayImg, cv.CV_GAUSSIAN, 7, 7)
    circles = getCircles(grayImg)
    iris.append(resImg)
    for circle in circles:
        rad = int(circle[0][2])
        global radius
        radius = rad
        cv.Circle(mask, centroid, rad, cv.CV_RGB(255, 255, 255), cv.CV_FILLED)
        cv.Not(mask, mask)
        cv.Sub(frame, copyImg, resImg, mask)
        x = int(centroid[0] - rad)
        y = int(centroid[1] - rad)
        w = int(rad * 2)
        h = w
        cv.SetImageROI(resImg, (x, y, w, h))
        cropImg = cv.CreateImage((w, h), 8, 3)
        cv.Copy(resImg, cropImg)
        cv.ResetImageROI(resImg)
        return (cropImg)
    return (resImg)
Example #13
0
def CompositeThumbnail(img, regions, thumb_size=100):
    '''extract a composite thumbnail for the regions of an image

    The composite will consist of N thumbnails side by side
    '''
    composite = cv.CreateImage((thumb_size * len(regions), thumb_size), 8, 3)
    x0 = y0 = 0
    for i, r in enumerate(regions):
        (x1, y1, x2, y2) = r.tuple()
        midx = (x1 + x2) / 2
        midy = (y1 + y2) / 2

        if (x2 - x1) > thumb_size or (y2 - y1) > thumb_size:
            # we need to shrink the region
            rsize = max(x2 + 1 - x1, y2 + 1 - y1)
            src = cuav_util.SubImage(
                img, (midx - rsize / 2, midy - rsize / 2, rsize, rsize))
            thumb = cv.CreateImage((thumb_size, thumb_size), 8, 3)
            cv.Resize(src, thumb)
        else:
            if x1 > x0 and x1 < x0 + thumb_size and y1 > y0 and y1 < y0 + thumb_size:
                r.dupe = True
                continue

            x1 = midx - thumb_size / 2
            y1 = midy - thumb_size / 2
            x0 = x1
            y0 = y1
            thumb = cuav_util.SubImage(img, (x1, y1, thumb_size, thumb_size))
        cv.SetImageROI(composite, (thumb_size * i, 0, thumb_size, thumb_size))
        cv.Copy(thumb, composite)
        cv.ResetImageROI(composite)
    return composite
Example #14
0
    def sample_frame(self, frame):
        # Get an average of the green channel in on the forehead
        cv.SetImageROI(frame, self.face_tracker.get_forehead())
        sample = cv.Avg(frame)[1]
        cv.ResetImageROI(frame)

        return sample
Example #15
0
def detect_and_crop(image_file,
                    cascade,
                    crop_size,
                    output_dir=None,
                    unique=False,
                    verbose=False):

    (basename, prefix) = os.path.splitext(image_file)

    img = cv.LoadImage(image_file, 1)

    if output_dir:
        basename = output_dir + '/' + os.path.basename(basename)

    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # equalize histogram
    cv.EqualizeHist(gray, gray)

    faces = cv.HaarDetectObjects(
        img,
        cascade,
        cv.CreateMemStorage(0),
        # The factor by which the search window is scaled between the
        # subsequent scans, 1.1 means increasing window by 10%
        1.2,
        # Minimum number (minus 1) of neighbor rectangles that makes
        # up an object
        3,
        # CV_HAAR_DO_CANNY_PRUNNING
        1,
        # Minimum window size
        (40, 40))  # minimum size

    if faces and (len(faces) == 1 or not unique):
        count = 0
        for ((x, y, w, h), n) in faces:
            if verbose: print((x, y, w, h), n)

            cv.SetImageROI(img, (x, y, w, h))

            # create destination image
            # Note that cvGetSize will return the width and the height of ROI
            crop = cv.CreateImage((w, h), img.depth, img.nChannels)

            resized = cv.CreateImage(crop_size, cv.IPL_DEPTH_8U, img.nChannels)

            # copy subimage
            cv.Copy(img, crop)
            cv.Resize(crop, resized)

            cv.SaveImage(basename + '_' + str(count) + '.jpg', resized)
            count += 1

            # always reset the Region of Interest
            cv.ResetImageROI(img)
Example #16
0
    def drawHistogram(self, image, chnum, hist_arr, plateaus):
        positions = (0, (self.Ihist.height + 10), 2 * self.Ihist.height + 20)
        colour_values = _blue, _green, _red
        colour = colour_values[chnum]
        Y = positions[chnum]

        cv.Set(self.Ihist, _trans)
        bin_w = cv.Round(float(self.Ihist.width) / self.hist_size)
        # min_value, max_value, pmin, pmax = cv.GetMinMaxHistValue(hist)

        X = image.width - self.Ihist.width
        rect = (X, Y, self.Ihist.width, self.Ihist.height)

        cv.SetImageROI(image, rect)
        scaling = self.Ihist.height / max(hist_arr)
        hist_arr *= scaling
        for i, v in enumerate(hist_arr):
            cv.Rectangle(self.Ihist, (i * bin_w, self.Ihist.height),
                         ((i + 1) * bin_w, self.Ihist.height - round(v)),
                         colour, -1, 8, 0)

        for i in plateaus[chnum]:
            cv.Rectangle(
                self.Ihist, (i * bin_w, self.Ihist.height),
                ((i + 1) * bin_w, self.Ihist.height - round(hist_arr[i])),
                _white, -1, 8, 0)

        cv.AddWeighted(image, 1 - self.hist_visibility, self.Ihist,
                       self.hist_visibility, 0.0, image)

        cv.ResetImageROI(image)
Example #17
0
    def _improve_corners(self, new_corners):
        '''
        new corners are made only from small rectangles, that means that corners
        arms are very short, and we should expand them as far as we can
        @param new_corners: list of corners
        '''
        L = len(new_corners)
        ncorners = list(new_corners)
        #print 'improve'
        crit = (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1)
        cv.ResetImageROI(self.gray_img)
        for i, cor in enumerate(new_corners):
            if cor is not None:
                #TODO Check efficiency, maybe we should do it once for all corners?
                ncorners[i] = self._improve_corner(new_corners, i)
                if ncorners[i] is None and i > 0: #this corner is not valid
                    #previous corner was already improved by wrong data
                    #we have to correct that
                    ncorners[i - 1] = self._improve_corner(new_corners, i - 1)

        if self.m_d<>0:
            scale_factor = 1<<self.m_d.scale
            for cor in ncorners:
                if cor is not None:
                    cor.scale_up(self.m_d)
                    cor.p = cv.FindCornerSubPix(self.gray_img, [cor.p],
                                                (scale_factor+1, scale_factor+1), (0, 0), crit)[0]
        return ncorners
Example #18
0
def publish_debug(img, results):
    imgsize = cv.GetSize(img)
    sizelist = [cv.GetSize(tmp[1]) for tmp in templates]
    width = max(imgsize[0], sum([s[0] for s in sizelist]))
    height = imgsize[1] + max([s[1] for s in sizelist])
    output = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)
    cv.Zero(output)
    cur_x = 0

    view_rect = (0, height-imgsize[1], imgsize[0], imgsize[1])
    cv.SetImageROI(output, view_rect)
    cv.Copy(img, output)
    for template in templates:
        size = cv.GetSize(template[1])
        cv.SetImageROI(output, (cur_x, 0, size[0], size[1]))
        cv.Copy(template[1], output)
        cur_x += size[0]

        #cv.PutText(output, tempname, (0,size[1]-16), font, cv.CV_RGB(255,255,255))
        #cv.PutText(output, str(tempthre)+'<'+str(status[1]), (0,size[1]-8), font, cv.CV_RGB(255,255,255))
        for _,status in [s for s in results if s[0] == template[3]]:
            print status
            cv.PutText(output, template[3], (0,size[1]-42), font, cv.CV_RGB(255,255,255))
            cv.PutText(output, "%7.5f"%(status[0]), (0,size[1]-24), font, cv.CV_RGB(255,255,255))
            cv.PutText(output, "%7.5f"%(status[2]), (0,size[1]-8), font, cv.CV_RGB(255,255,255))
            if status[3] : 
                cv.Rectangle(output, (0, 0), size, cv.RGB(255,255,255), 9)
        cv.SetImageROI(output, view_rect)
        for _,status in [s for s in results if s[0] == template[3]]:
            pt2 = (status[1][0]+size[0], status[1][1]+size[1])
            if status[3] : 
                cv.Rectangle(output, status[1], pt2, cv.RGB(255,255,255), 5)

    cv.ResetImageROI(output)
    debug_pub.publish(bridge.cv_to_imgmsg(output, encoding="passthrough"))
Example #19
0
def OverlayImage(img, img2, x, y):
    '''overlay a 2nd image on a first image, at position x,y
	on the first image'''
    (w, h) = cv.GetSize(img2)
    cv.SetImageROI(img, (x, y, w, h))
    cv.Copy(img2, img)
    cv.ResetImageROI(img)
def decode_v2_wrapper(imgpath,
                      markpath,
                      Icol,
                      H_GAP=7,
                      W_MARK=WIDTH_MARK,
                      H_MARK=HEIGHT_MARK,
                      idx2tol=None):
    I = cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_GRAYSCALE)
    result = decode_v2(I,
                       markpath,
                       Icol,
                       False,
                       _imgpath=imgpath,
                       H_GAP=H_GAP,
                       W_MARK=W_MARK,
                       H_MARK=H_MARK,
                       idx2tol=idx2tol)
    if result == None and not DEBUG_SKIP_FLIP:
        print_dbg("...Trying FLIP...")
        cv.ResetImageROI(I)
        result = decode_v2(I,
                           markpath,
                           Icol,
                           True,
                           _imgpath=imgpath,
                           H_GAP=H_GAP,
                           W_MARK=W_MARK,
                           H_MARK=H_MARK,
                           idx2tol=idx2tol)

    if result == None:
        return None, None, None
    else:
        decoding, isflip, bbs_out = result
        return (decoding, isflip, bbs_out)
Example #21
0
	def rotacion_y_posicion_robot(self,robo_x=200,robo_y=100,robo_th=80):
		"""
		\brief graficar el robot en el lienzo de mapa 
		\param self no se necesita incluirlo al utilizar la funcion ya que se lo pone solo por ser la definicion de una clase
		\param robo_x coordenada x de la odometria del robot 
		\param robo_y coordenada y de la odometria del robot 
		\param robo_th Valor Th del robot
		\return Nada
		"""
		image_mapa=cv.LoadImage(self.nombre_archivo1, cv.CV_LOAD_IMAGE_COLOR)
		dimensiones_robot=self.dimensiones_robot
		image1=cv.CreateImage(dimensiones_robot,8,3)
		image_mascara=cv.CreateImage(dimensiones_robot,8,1)
		
		##rotacion
		#Rotar el robot
		src_center=dimensiones_robot[0]/2,dimensiones_robot[1]/2
		rot_mat=cv.CreateMat( 2, 3, cv.CV_32FC1 )
		cv.GetRotationMatrix2D(src_center, robo_th, 1.0,rot_mat);
		cv.WarpAffine(self.robot,image1,rot_mat)
		#crear filtro para negro
		cv.InRangeS(image1,cv.RGB(0,0,0),cv.RGB(14,14,14),image_mascara)
		cv.Not(image_mascara,image_mascara)
		#cv.ReleaseImage(image1)
		
		#reducir y posicion
		cv.SetImageROI(image_mapa,(robo_x,robo_y, dimensiones_robot[0], dimensiones_robot[1]));
		cv.Copy(image1,image_mapa,mask=image_mascara)
		cv.ResetImageROI(image_mapa);
		cv.SaveImage(self.nombre_archivo, image_mapa) #Saves the image#
Example #22
0
def CompositeThumbnail(img, regions, thumb_size=100):
    '''extract a composite thumbnail for the regions of an image

    The composite will consist of N thumbnails side by side
    '''
    composite = cv.CreateImage((thumb_size*len(regions), thumb_size),8,3)
    for i in range(len(regions)):
        (x1,y1,x2,y2) = regions[i].tuple()
        midx = (x1+x2)/2
        midy = (y1+y2)/2

        if (x2-x1) > thumb_size or (y2-y1) > thumb_size:
            # we need to shrink the region
            rsize = max(x2+1-x1, y2+1-y1)
            src = cuav_util.SubImage(img, (midx-rsize/2,midy-rsize/2,rsize,rsize))
            thumb = cv.CreateImage((thumb_size, thumb_size),8,3)
            cv.Resize(src, thumb)
        else:
            x1 = midx - thumb_size/2
            y1 = midy - thumb_size/2
            thumb = cuav_util.SubImage(img, (x1, y1, thumb_size, thumb_size))
        cv.SetImageROI(composite, (thumb_size*i, 0, thumb_size, thumb_size))
        cv.Copy(thumb, composite)
        cv.ResetImageROI(composite)
    return composite
Example #23
0
def recognise(image, addr, extras):
    result = ""
    x = image.width - 1
    channels = getChannels(image)
    bestBounds = []
    #cv.NamedWindow("pic", 1)
    #cv.NamedWindow("cols", 0)
    while len(result) < nSegs and x >= minW:
        x = cap.getBound(image, cap.CAP_BOUND_RIGHT, start=x)
        ratings = []
        for w in xrange(minW, min(maxW + 1, x)):
            bounds = findBounds(image, x, w)
            subImage = cap.getSubImage(image, bounds)
            flags = findColors(subImage)
            for index, flag in enumerate(flags):
                if not flag: continue
                seg = getSegment(channels[index], image, bounds)
                seg = cap.flattenImage(adjustSize(seg, segSize))
                guesses = ann.run(seg)
                charIndex = cap.argmax(guesses)
                ratings.append(
                    (guesses[charIndex], charIndex, index, bounds, seg))
        best = max(ratings, key=itemgetter(0))
        result += charset[best[1]]
        bestChannel = channels[best[2]]
        cv.SetImageROI(bestChannel, best[3])
        cv.Set(bestChannel, 96, bestChannel)
        cv.ResetImageROI(bestChannel)
        bestBounds.append(best[3])
        bestW = best[3][2]
        x -= bestW
        #print ann.run(best[4])
    cap.processExtras([cap.drawComponents(image, bestBounds)], addr, extras,
                      cap.CAP_STAGE_RECOGNISE)
    return result[::-1]
Example #24
0
def DetectRedEyes(image, faceCascade, eyeCascade):
    min_size = (20, 20)
    image_scale = 2
    haar_scale = 1.2
    min_neighbors = 2
    haar_flags = 0

    # Allocate the temporary images
    gray = cv.CreateImage((image.width, image.height), 8, 1)
    smallImage = cv.CreateImage((cv.Round(
        image.width / image_scale), cv.Round(image.height / image_scale)), 8,
                                1)

    # Convert color input image to grayscale
    cv.CvtColor(image, gray, cv.CV_BGR2GRAY)

    # Scale input image for faster processing
    cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

    # Equalize the histogram
    cv.EqualizeHist(smallImage, smallImage)

    # Detect the faces
    faces = cv.HaarDetectObjects(smallImage, faceCascade,
                                 cv.CreateMemStorage(0), haar_scale,
                                 min_neighbors, haar_flags, min_size)

    # If faces are found
    if faces:
        print "face detected"
        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
            face_region = cv.GetSubRect(image,
                                        (x, int(y + (h / 4)), w, int(h / 2)))

        cv.SetImageROI(
            image,
            (pt1[0], pt1[1], pt2[0] - pt1[0], int((pt2[1] - pt1[1]) * 0.7)))
        eyes = cv.HaarDetectObjects(image, eyeCascade, cv.CreateMemStorage(0),
                                    haar_scale, min_neighbors, haar_flags,
                                    (15, 15))

        # if eyes:
        # 	# For each eye found
        # 	for eye in eyes:
        # 		# Draw a rectangle around the eye
        # 		cv.Rectangle(image,
        # 		(eye[0][0],
        # 		eye[0][1]),
        # 		(eye[0][0] + eye[0][2],
        # 		eye[0][1] + eye[0][3]),
        # 		cv.RGB(255, 0, 0), 1, 8, 0)

    cv.ResetImageROI(image)
    return image
Example #25
0
    def update_histogram(self, face):
        (x, y, w, h) = face
        x2 = int(x + w * FACE_BORDER)
        y2 = int(y + h * FACE_BORDER)
        w2 = int(w - w * FACE_BORDER * 2)
        h2 = int(h - h * FACE_BORDER * 2)

        cv.SetImageROI(self.hue, (x2, y2, w2, h2))
        cv.SetImageROI(self.sat, (x2, y2, w2, h2))
        cv.CalcArrHist([self.hue, self.sat], self.hist, 1)
        cv.NormalizeHist(self.hist, 255)
        cv.ResetImageROI(self.hue)
        cv.ResetImageROI(self.sat)

        cv.Rectangle(self.visualize, (x, y), (x + w, y + h), (255, 0, 0))
        cv.Rectangle(self.visualize, (x2, y2), (x2 + w2, y2 + h2),
                     (128, 150, 0))
Example #26
0
def get_elements(filename, treshold=50, minheight=15, minarea=200, elements=6):
    src = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE)
    test = cv.CreateImage(cv.GetSize(src), 32, 3)
    dst = cv.CreateImage(cv.GetSize(src), 8, 1)
    storage = cv.CreateMemStorage(0)
    cv.Canny(src, dst, treshold, treshold * 3, 3)

    storage = cv.CreateMemStorage(0)
    seqs = cv.FindContours(dst, storage, cv.CV_RETR_TREE,
                           cv.CV_CHAIN_APPROX_NONE, (0, 0))

    res = []

    c = seqs.h_next()
    while True:
        if not c:
            break
        box = cv.BoundingRect(c)
        area = box[2] * box[3]
        #and (area > minarea)
        if (box[3] > minheight):
            res.append(box)
        c = c.h_next()

    if len(res) < elements:
        while len(res) < elements:
            m = 0
            c = 0
            for i, e in enumerate(res):
                if e[3] > m:
                    m = e[3]
                    c = i

            big = res.pop(c)
            res.append((big[0], big[1], int(big[2] * 1.0 / 2), big[3]))
            res.append((big[0] + int(big[2] * 1.0 / 2), big[1],
                        int(big[2] * 1.0 / 2), big[3]))

    #for box in res:
    #    cv.Rectangle(dst, (box[0],box[1]), (box[0]+box[2],box[1]+box[3]), cv.RGB(255,255,255))

    #cv.ShowImage('Preview2',dst)
    #cv.WaitKey()

    imgs = []
    print len(res)
    for box in res:
        cv.SetImageROI(src, box)

        tmp = cv.CreateImage((box[2], box[3]), 8, 1)

        cv.Copy(src, tmp)
        hq.heappush(imgs, (box[0], tmp))

        cv.ResetImageROI(src)

    res = [hq.heappop(imgs)[1] for i in xrange(len(res))]
    return res
Example #27
0
def detectRedEyes(image, faceCascade, eyeCascade):
    min_size = (20, 20)
    image_scale = 2
    haar_scale = 1.2
    min_neighbors = 2
    haar_flags = 0

    # Allocate the temporary images
    gray = cv.CreateImage((image.width, image.height), 8, 1)
    smallImage = cv.CreateImage((cv.Round(
        image.width / image_scale), cv.Round(image.height / image_scale)), 8,
                                1)
    # Convert color input image to grayscale
    cv.CvtColor(image, gray, cv.CV_BGR2GRAY)
    # Scale input image for faster processing
    cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)
    # Equalize the histogram
    cv.EqualizeHist(smallImage, smallImage)

    # Detect the faces
    faces = cv.HaarDetectObjects(smallImage, faceCascade,
                                 cv.CreateMemStorage(0), haar_scale,
                                 min_neighbors, haar_flags, min_size)

    # If faces are found
    if faces:
        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 3, 8,
                         0)  # If faces are found

    # Estimate the eyes position
    # First, set the image region of interest
    # The last division removes the lower part of the face to lower probability for false recognition
    cv.SetImageROI(
        image, (pt1[0], pt1[1], pt2[0] - pt1[0], int((pt2[1] - pt1[1]) * 0.6)))

    # Detect the eyes
    eyes = cv.HaarDetectObjects(image, eyeCascade, cv.CreateMemStorage(0),
                                haar_scale, min_neighbors, haar_flags,
                                (20, 15))
    # If eyes were found
    if eyes:
        # For each eye found
        for eye in eyes:
            # Draw a rectangle around the eye
            cv.Rectangle(image, (eye[0][0], eye[0][1]),
                         (eye[0][0] + eye[0][2], eye[0][1] + eye[0][3]),
                         cv.RGB(255, 0, 0), 1, 8, 0)

    # Finally, reset the image region of interest (otherwise this won t
    # be drawn correctly
    cv.ResetImageROI(image)

    return image
Example #28
0
 def putText(self, text, point, color=(0, 0, 255)):
     '''
     Draws text on self.image
     @param text:
     @param point:
     @param color:
     '''
     cv.ResetImageROI(self.img)
     cv.PutText(self.img, str(text), point, self.font, color)
Example #29
0
def get_face_regions(ann, ann2, img, classifier):
    img = normalize_rgb(img, aggressive=0.005)
    mask, seqs = get_mask_with_contour(img,
                                       ret_cont=True,
                                       ret_img=True,
                                       with_init_mask=False)
    if not seqs:
        return img


#    show_image(mask)
    skin_regions, min_rects = get_skin_rectangles(seqs)
    skin_regions = merge_boxes(skin_regions)
    draw_boxes(skin_regions, img, with_text=False, thickness=1)
    #
    #    small_img = prepare_bw(scale_image(img))
    #    cv.EqualizeHist(small_img, small_img)
    #    objects = cv.HaarDetectObjects(small_img, classifier, cv.CreateMemStorage(0), 1.1, 3,
    #                         cv.CV_HAAR_DO_CANNY_PRUNING | cv.CV_HAAR_FIND_BIGGEST_OBJECT | cv.CV_HAAR_DO_ROUGH_SEARCH,
    #                         min_size=(50,50))
    #    found = [[k*2 for k in obj] for obj, num in objects]
    #    draw_boxes(found, img, with_text=False, color=cv.RGB(255,255,255))

    for region in skin_regions:
        #        cv.SetImageROI(img, region)

        #        cv.ResetImageROI(img)
        #        if objects:

        cv.SetImageROI(img, region)
        region_img = cv.CreateImage(region[2:], img.depth, img.channels)
        cv.Copy(img, region_img)
        found = []
        try:
            for i, (sample, box) in enumerate(
                    samples_generator(region_img,
                                      32,
                                      32,
                                      slide_step=4,
                                      resize_step=1.5,
                                      bw_from_v_plane=False)):
                #                cv.SaveImage(root_folder+"webcam/%d.png" % (p+i), sample)
                nf, f = ann.activate(get_flatten_image(sample))
                nf2, f2 = ann2.activate(get_flatten_image(laplace(sample)))
                buf_nf, buf_f = tuple(ann['out'].inputbuffer[0])
                _, buf_f2 = tuple(ann2['out'].inputbuffer[0])
                if f > nf and f2 > nf2 and buf_f > 250000 and buf_f2 > 50000:
                    found.append(box)
        except Exception:
            pass
        if found:
            draw_boxes(found,
                       img,
                       with_text=False,
                       color=cv.RGB(255, 255, 255))
        cv.ResetImageROI(img)
    return img
Example #30
0
 def DetectFace(self, image, x, y, w, h, boolean=True):
     if boolean:
         if x-40 > 0:
             x -= 40
         if y-30 > 0:
             y -= 30
     cv.Rectangle(image, (x,y),(x+w,y+h), cv.RGB(155, 255, 25), 2)
     cv.ResetImageROI(image)
     return image, (x, y, w, h)