Example #1
0
    def segmentize(self, input_img):
        histogram = self.__calculate_histogram(input_img)
        (peaks, bottoms) = find_peaks(histogram, max(histogram) / 8)

        for peak in peaks:
            if between(peak, self.expectations['table']):
                self.ranges['table'] = find_hill(histogram, peak, 0.01)
                break  #stop searching, we want the first peak since that is the highest large object.

        if not 'table' in self.ranges:
            if self.verbose:
                cv.ShowImage("histogram", self.__render_histogram(histogram))
            return None

        self.ranges['objects'] = (1, self.ranges['table'][0] - 1)

        print self.ranges

        if self.verbose:
            cv.ShowImage("histogram", self.__render_histogram(histogram))

        objects_img = create_empty_image((input_img.width, input_img.height))
        cv.SetImageROI(objects_img, cv.GetImageROI(input_img))
        cv.InRangeS(input_img, cv.Scalar(self.ranges['objects'][0]),
                    cv.Scalar(self.ranges['objects'][1]), objects_img)

        table_img = create_empty_image((input_img.width, input_img.height))
        cv.SetImageROI(table_img, cv.GetImageROI(input_img))
        cv.InRangeS(input_img, cv.Scalar(self.ranges['table'][0]),
                    cv.Scalar(self.ranges['table'][1]), table_img)

        return {'objects': objects_img, 'table': table_img}
def size_image(img, imgsize):
    # check if we need to crop out ROI
    roiWidth = img.width
    roiHeight = img.height
    if (img.width > imgsize[1]):
        roiWidth = imgsize[1]

    if (img.height > imgsize[0]):
        roiHeight = imgsize[0]

    roi = (0, 0, roiWidth, roiHeight)
    cv.SetImageROI(img, roi)
    imgTrim = cv.CreateImage((roi[2], roi[3]), img.depth, img.nChannels)
    cv.Copy(img, imgTrim)

    # check if we need to pad
    padSize = 0
    padSize = max(padSize, imgsize[0] - imgTrim.height)
    padSize = max(padSize, imgsize[1] - imgTrim.width)

    if padSize == 0:  # no padding needed
        return imgTrim
    else:
        padSize = int(round((padSize + .5) / 2.))
        # copy make border
        imgPad = cv.CreateImage(
            (imgTrim.width + 2 * padSize, imgTrim.height + 2 * padSize),
            img.depth, img.nChannels)
        cv.CopyMakeBorder(imgTrim, imgPad, (0, 0), 0)
        roi = (0, 0, imgsize[1], imgsize[0])
        cv.SetImageROI(imgPad, roi)
        imgFinal = cv.CreateImage((roi[2], roi[3]), img.depth, img.nChannels)
        cv.Copy(imgPad, imgFinal)
        return imgFinal
Example #3
0
def removeBadBackground(seg):
    threshUp = cv.CreateImage(cv.GetSize(seg), cv.IPL_DEPTH_8U, 1)
    comparison = cv.CreateImage(cv.GetSize(seg), cv.IPL_DEPTH_8U, 1)
    visitMask = cv.CreateImage(cv.GetSize(seg), cv.IPL_DEPTH_8U, 1)
    ffMask = cv.CreateImage((seg.width + 2, seg.height + 2), cv.IPL_DEPTH_8U,
                            1)
    cv.Threshold(seg, threshUp, 1, 255, cv.CV_THRESH_BINARY)
    cv.Zero(visitMask)
    cv.Zero(ffMask)
    for x in xrange(seg.width):
        for y in xrange(seg.height):
            if seg[y, x] != 96 or visitMask[y, x] == 255: continue
            comp = cv.FloodFill(threshUp, (x, y), 0, 0, 0,
                                4 + cv.CV_FLOODFILL_MASK_ONLY + (255 << 8),
                                ffMask)
            rect = comp[2]
            cv.SetImageROI(ffMask, cap.shiftRect(rect, 1, 1))
            cv.OrS(ffMask, 1, ffMask)
            cv.SetImageROI(seg, rect)
            cv.SetImageROI(comparison, rect)
            cv.Cmp(
                seg, ffMask, comparison,
                cv.CV_CMP_EQ)  # 'comparison' does not need to be zeroed later
            intersect = cv.CountNonZero(comparison)
            cv.SetImageROI(visitMask, rect)
            cv.Or(visitMask, ffMask, visitMask)
            cv.ResetImageROI(visitMask)
            if intersect == 0:
                cv.Set(seg, 0, ffMask)
            cv.Zero(ffMask)
            cv.ResetImageROI(seg)
            cv.ResetImageROI(ffMask)
    return seg
Example #4
0
    def draw(self, img, pixmapper):
        '''draw the icon on the image'''

        if self.trail is not None:
            self.trail.draw(img, pixmapper)

        icon = self.img()
        (px, py) = pixmapper(self.latlon)

        # find top left
        px -= icon.width / 2
        py -= icon.height / 2
        w = icon.width
        h = icon.height

        (px, py, sx, sy, w, h) = self.clip(px, py, w, h, img)

        cv.SetImageROI(icon, (sx, sy, w, h))
        cv.SetImageROI(img, (px, py, w, h))
        cv.Add(icon, img, img)
        cv.ResetImageROI(img)
        cv.ResetImageROI(icon)

        # remember where we placed it for clicked()
        self.posx = px + w / 2
        self.posy = py + h / 2
Example #5
0
def crop(image, offset, size):
    w, h = size

    # If there will be a border, use CopyMakeBorder.
    # Setting ROI, no border is created and resulting image is smaller
    if offset[0]>0 or \
       offset[1]>0 or \
       offset[0]+cv.GetSize(image)[0]<w or \
       offset[1]+cv.GetSize(image)[1]<h:

        finalImg = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 3)

        # offset may have negative values, if there will be a right/bottom border
        useOffset = (max(0, offset[0]), max(0, offset[1]))

        # Need to crop first as CopyMakeBorder will complain if the source is too big for the destination
        # (The ROI is the opposite of the offset)
        cv.SetImageROI(image, (-offset[0], -offset[1], w, h))

        cv.CopyMakeBorder(image, finalImg, useOffset, GAP_BORDER)

        return finalImg

    else:
        cv.SetImageROI(image, (-offset[0], -offset[1], w, h))
        return image
Example #6
0
def publish_debug(img, results):
    imgsize = cv.GetSize(img)
    sizelist = [cv.GetSize(tmp[1]) for tmp in templates]
    width = max(imgsize[0], sum([s[0] for s in sizelist]))
    height = imgsize[1] + max([s[1] for s in sizelist])
    output = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 1)
    cv.Zero(output)
    cur_x = 0

    view_rect = (0, height-imgsize[1], imgsize[0], imgsize[1])
    cv.SetImageROI(output, view_rect)
    cv.Copy(img, output)
    for template in templates:
        size = cv.GetSize(template[1])
        cv.SetImageROI(output, (cur_x, 0, size[0], size[1]))
        cv.Copy(template[1], output)
        cur_x += size[0]

        #cv.PutText(output, tempname, (0,size[1]-16), font, cv.CV_RGB(255,255,255))
        #cv.PutText(output, str(tempthre)+'<'+str(status[1]), (0,size[1]-8), font, cv.CV_RGB(255,255,255))
        for _,status in [s for s in results if s[0] == template[3]]:
            print status
            cv.PutText(output, template[3], (0,size[1]-42), font, cv.CV_RGB(255,255,255))
            cv.PutText(output, "%7.5f"%(status[0]), (0,size[1]-24), font, cv.CV_RGB(255,255,255))
            cv.PutText(output, "%7.5f"%(status[2]), (0,size[1]-8), font, cv.CV_RGB(255,255,255))
            if status[3] : 
                cv.Rectangle(output, (0, 0), size, cv.RGB(255,255,255), 9)
        cv.SetImageROI(output, view_rect)
        for _,status in [s for s in results if s[0] == template[3]]:
            pt2 = (status[1][0]+size[0], status[1][1]+size[1])
            if status[3] : 
                cv.Rectangle(output, status[1], pt2, cv.RGB(255,255,255), 5)

    cv.ResetImageROI(output)
    debug_pub.publish(bridge.cv_to_imgmsg(output, encoding="passthrough"))
Example #7
0
def SubImage(img, region):
    '''return a subimage as a new image. This allows
	for the region going past the edges.
	region is of the form (x1,y1,width,height)'''
    (x1, y1, width, height) = region
    zeros = numpy.zeros((height, width, 3), dtype='uint8')
    ret = cv.GetImage(cv.fromarray(zeros))
    (img_width, img_height) = cv.GetSize(img)
    if x1 < 0:
        sx1 = 0
        xofs = -x1
    else:
        sx1 = x1
        xofs = 0
    if y1 < 0:
        sy1 = 0
        yofs = -y1
    else:
        sy1 = y1
        yofs = 0
    if sx1 + width <= img_width:
        w = width
    else:
        w = img_width - sx1
    if sy1 + height <= img_height:
        h = height
    else:
        h = img_height - sy1
    cv.SetImageROI(img, (sx1, sy1, w - xofs, h - yofs))
    cv.SetImageROI(ret, (xofs, yofs, w - xofs, h - yofs))
    cv.Copy(img, ret)
    cv.ResetImageROI(img)
    cv.ResetImageROI(ret)
    return ret
Example #8
0
 def update_histogram(self, region):
     cv.SetImageROI(self.hue, region)
     cv.SetImageROI(self.sat, region)
     cv.CalcArrHist([self.hue, self.sat], self.face_hist, 1)
     #cv.NormalizeHist(self.face_hist, 255)
     cv.ResetImageROI(self.hue)
     cv.ResetImageROI(self.sat)
Example #9
0
def findCCs(image, erasecol=0, doContinue=None, doSkip=None, bRange=0, connectivity=8):
    """
    Finds all connected components in the image.
    doContinue is a function applied to the color of every new pixel in the image.
    If it is true, this pixel is ignored. Default: <= 128
    doSkip is a function applied to every new connected component found by the
    function. If it is true, this component will not be included in the result.
    Default: do not skip anything.
    """
    if doContinue is None:
        doContinue = lambda col: col <= 128
    if doSkip is None:
        doSkip = lambda comp: False
    mask = cv.CreateImage((image.width + 2, image.height + 2), cv.IPL_DEPTH_8U, 1)
    cv.Zero(mask)
    components = []
    for x in range(image.width):
        for y in range(image.height):
            if doContinue(image[y, x]):
                continue
            comp = cv.FloodFill(image, (x, y), 0, bRange, bRange, connectivity + cv.CV_FLOODFILL_MASK_ONLY + (255 << 8), mask) # here 3rd argument is ignored
            region = shiftRect(comp[2], 1, 1)
            if not doSkip(comp):
                seg = cvext.getSubImage(mask, region)
                components.append((comp[0], comp[1], comp[2], seg))
            cv.SetImageROI(image, comp[2])
            cv.SetImageROI(mask, region)
            cv.Set(image, erasecol, mask)
            cv.Zero(mask)
            cv.ResetImageROI(image)
            cv.ResetImageROI(mask)
    return components
Example #10
0
def repaintCCs(image, doRepaint=None, returnMask=False, resizeMask=True, doFillBackground=True, bgPoint=(0, 0), newcol=255, connectivity=4):
    if doRepaint is None:
        doRepaint = lambda comp, col: False
    resultMask = cv.CreateImage((image.width + 2, image.height + 2), image.depth, image.nChannels)
    tempMask = cv.CreateImage((image.width + 2, image.height + 2), image.depth, image.nChannels)
    visitMask = cv.CreateImage((image.width + 2, image.height + 2), image.depth, image.nChannels)
    cv.Zero(resultMask)
    cv.Zero(tempMask)
    cv.Zero(visitMask)
    if doFillBackground:
        cv.FloodFill(image, bgPoint, 0, 0, 0, connectivity + cv.CV_FLOODFILL_MASK_ONLY + (255 << 8), visitMask)
    for x in xrange(image.width):
        for y in xrange(image.height):
            if visitMask[y + 1, x + 1] == 255:
                continue
            comp = cv.FloodFill(image, (x, y), 0, 0, 0, connectivity + cv.CV_FLOODFILL_MASK_ONLY + (255 << 8), tempMask)
            region = shiftRect(comp[2], 1, 1)
            cv.SetImageROI(tempMask, region)
            cv.SetImageROI(visitMask, region)
            cv.Or(tempMask, visitMask, visitMask)
            if doRepaint(comp, image[y, x]):
                cv.SetImageROI(resultMask, region)
                cv.Or(tempMask, resultMask, resultMask)
                cv.ResetImageROI(resultMask)
            cv.Zero(tempMask)
            cv.ResetImageROI(tempMask)
            cv.ResetImageROI(visitMask)
    if returnMask:
        if resizeMask: return cap.getSubImage(resultMask, (1, 1, image.width, image.height))
        else: return resultMask
    else:    
        cv.SetImageROI(resultMask, (1, 1, image.width, image.height))
        cv.Set(image, newcol, resultMask)
        return image
Example #11
0
def minmax_cv(imgpaths,
              do_align=False,
              rszFac=1.0,
              trfm_type='rigid',
              minArea=np.power(2, 16),
              bbs_map=None,
              imgCache=None):
    """ Generates min/max overlays for IMGPATHS. If DO_ALIGN is
    True, then this also aligns every image to the first image in
    IMGPATHS.
    Input:
        list IMGPATHS: [str imgpath_i, ...]
        bool DO_ALIGN:
        float RSZFAC: Resizing factor for alignment.
        dict BBS_MAP: maps {str imgpath: (x1,y1,x2,y2)}
    Output:
        cvMat minimg, cvMat maximg.
    """
    def load_image(imgpath):
        if imgCache == None:
            return cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_GRAYSCALE)
        else:
            ((img, imgpath), isHit) = imgCache.load(imgpath)
            return img

    if bbs_map == None:
        bbs_map = {}
    imgpath = imgpaths[0]
    bb0 = bbs_map.get(imgpath, None)
    Imin = load_image(imgpath)
    if bb0:
        coords = (bb0[0], bb0[1], bb0[2] - bb0[0], bb0[3] - bb0[1])
        coords = tuple(map(int, coords))
        cv.SetImageROI(Imin, coords)
    Imax = cv.CloneImage(Imin)

    #Iref = np.asarray(cv.CloneImage(Imin)) if do_align else None
    Iref = (iplimage2np(cv.CloneImage(Imin)) / 255.0) if do_align else None
    for imgpath in imgpaths[1:]:
        I = load_image(imgpath)
        bb = bbs_map.get(imgpath, None)
        if bb:
            bb = tuple(map(int, bb))
            cv.SetImageROI(I, (bb[0], bb[1], bb[2] - bb[0], bb[3] - bb[1]))
        Iout = matchsize(I, Imax)
        if do_align:
            tmp_np = iplimage2np(cv.CloneImage(Iout)) / 255.0
            H, Ireg, err = imagesAlign(tmp_np,
                                       Iref,
                                       trfm_type=trfm_type,
                                       fillval=0,
                                       rszFac=rszFac,
                                       minArea=minArea)
            Ireg *= 255.0
            Ireg = Ireg.astype('uint8')
            Iout = np2iplimage(Ireg)
        cv.Max(Iout, Imax, Imax)
        cv.Min(Iout, Imin, Imin)
    return Imin, Imax
def estimate_ballot_rot(I, Imarkfull, bbs, MAX_THETA=2.0, K=5):
    roi_prev = cv.GetImageROI(I)
    w_markfull, h_markfull = cv.GetSize(Imarkfull)
    theta_tm = None
    for bb in bbs:
        roi_cur = tuple(
            map(lambda x: int(round(x)),
                (roi_prev[0] + bb[0], roi_prev[1] + bb[1], bb[2] - bb[0],
                 bb[3] - bb[1])))
        cv.SetImageROI(I, roi_cur)
        w_cur, h_cur = cv.GetSize(I)

        if DEBUG_SAVEIMGS:
            print_dbg("<><><><> Saving '_Imiddle.png' <><><><>")
            cv.SaveImage("_Imiddle.png", I)
            pdb.set_trace()

        matches = tempmatch.get_tempmatches(
            Imarkfull, [I],
            T=0.9,
            do_smooth=tempmatch.SMOOTH_BOTH_BRD,
            xwinI=5,
            ywinI=5,
            xwinA=5,
            ywinA=5)[0]
        matches = sorted(matches, key=lambda t: t[0])
        if matches:
            xs = np.array([t[0] for t in matches])
            ys = np.array([cv.GetSize(I)[1] - t[1] for t in matches])
            if len(xs) <= 1:
                print_dbg("==== Couldn't find enough marks in '_Imiddle.png'.")
                continue
            # Filter out any obvious outliers
            lonely_idxs = detect_lonely_vals(ys, h_markfull)
            xs = np.delete(xs, lonely_idxs)
            ys = np.delete(ys, lonely_idxs)
            if len(xs) <= 1:
                print_dbg("==== Couldn't find enough marks in '_Imiddle.png'.")
                continue
            # Discovered marks must take up at least K*w_markfull space.
            x_area = max(xs) - min(xs)
            if x_area < (K * w_markfull):
                print_dbg(
                    "==== Marks only took up {0}, too small space.".format(
                        x_area))
            else:
                theta_tm_ = estimate_rotation(xs, ys)
                if abs(theta_tm_) > MAX_THETA:
                    print_dbg(
                        "==== Theta was too large: {0}".format(theta_tm_))
                else:
                    theta_tm = theta_tm_
                    break
        else:
            print_dbg("==== Couldn't find any marks in '_Imiddle.png'.")

    cv.SetImageROI(I, roi_prev)

    return theta_tm
Example #13
0
 def threshold(self):
     for x in range(0, self.size[0], 30):
         for y in range(0, self.size[1], 30):
             cv.SetImageROI(self.gray_img, (x, y, 30, 30))
             cv.SetImageROI(self.bw_img, (x, y, 30, 30))
             cv.Threshold(
                 self.gray_img, self.bw_img, 127, 255, cv.CV_THRESH_OTSU)
     cv.ResetImageROI(self.gray_img)
     cv.ResetImageROI(self.bw_img)
Example #14
0
    def red_eye(self):
        self.load_cascade_file()
        faces = [
            face for face in self.context.request.focal_points
            if face.origin == 'Face Detection'
        ]
        if faces:
            engine = self.context.modules.engine
            mode, data = engine.image_data_as_rgb()
            mode = mode.lower()
            sz = engine.size
            image = cv.CreateImageHeader(sz, cv.IPL_DEPTH_8U, 3)
            cv.SetData(image, data)

            for face in faces:
                face_x = int(face.x - face.width / 2)
                face_y = int(face.y - face.height / 2)

                face_roi = (int(face_x), int(face_y), int(face.width),
                            int(face.height))

                cv.SetImageROI(image, face_roi)

                eyes = cv.HaarDetectObjects(image, self.cascade,
                                            cv.CreateMemStorage(0), HAAR_SCALE,
                                            MIN_NEIGHBORS, HAAR_FLAGS,
                                            MIN_SIZE)

                for (x, y, w, h), other in self.filter_eyes(eyes):
                    # Set the image Region of interest to be the eye area [this reduces processing time]
                    cv.SetImageROI(image, (face_x + x, face_y + y, w, h))

                    if self.context.request.debug:
                        cv.Rectangle(image, (0, 0), (w, h),
                                     cv.RGB(255, 255, 255), 2, 8, 0)

                    for pixel in self.get_pixels(image, w, h, mode):
                        green_blue_avg = (pixel['g'] + pixel['b']) / 2

                        if not green_blue_avg:
                            red_intensity = RED_THRESHOLD
                        else:
                            # Calculate the intensity compared to blue and green average
                            red_intensity = pixel['r'] / green_blue_avg

                        # If the red intensity is greater than 2.0, lower the value
                        if red_intensity >= RED_THRESHOLD:
                            new_red_value = (pixel['g'] + pixel['b']) / 2
                            # Insert the new red value for the pixel to the image
                            cv.Set2D(
                                image, pixel['y'], pixel['x'],
                                cv.RGB(new_red_value, pixel['g'], pixel['b']))

                    # Reset the image region of interest back to full image
                    cv.ResetImageROI(image)

            self.context.modules.engine.set_image_data(image.tostring())
Example #15
0
def minmax_cv_v2(imgpaths,
                 Iref_imP=None,
                 do_align=False,
                 rszFac=1.0,
                 trfm_type='rigid',
                 minArea=np.power(2, 16),
                 bbs_map=None):
    """ Computes the overlays of IMGPATHS, but uses the IREF_IMP as the
    reference image to align against, if DO_ALIGN is True. Mainly a 
    function written for the parallel version (minmax_cv is still fine for
    single-process use).
    """
    bbs_map = {} if bbs_map == None else bbs_map
    if do_align:
        Iref = cv.LoadImage(Iref_imP, cv.CV_LOAD_IMAGE_GRAYSCALE)
        bbRef = bbs_map.get(Iref_imP, None)
        if bbRef:
            coords = tuple(
                map(int, (bbRef[0], bbRef[1], bbRef[2] - bbRef[0],
                          bbRef[3] - bbRef[1])))
            cv.SetImageROI(Iref)
    else:
        Iref = None
    # 0.) Prep first image
    imgpath0 = imgpaths[0]
    Imin = cv.LoadImage(imgpath0, cv.CV_LOAD_IMAGE_GRAYSCALE)
    bb0 = bbs_map.get(imgpath0, None)
    if bb0:
        coords = tuple(
            map(int, (bb0[0], bb0[1], bb0[2] - bb0[0], bb0[3] - bb0[1])))
        cv.SetImageROI(Imin)
    Imax = cv.CloneImage(Imin)
    Iref_np = (iplimage2np(cv.CloneImage(Iref)) / 255.0) if do_align else None
    for imgpath in imgpaths[1:]:
        I = cv.LoadImage(imgpath, cv.CV_LOAD_IMAGE_GRAYSCALE)
        bb = bbs_map.get(imgpath, None)
        if bb:
            bb = tuple(map(int, bb))
            cv.SetImageROI(I, (bb[0], bb[1], bb[2] - bb[0], bb[3] - bb[1]))
        Iout = matchsize(I, Imax)
        if do_align:
            tmp_np = iplimage2np(cv.CloneImage(Iout)) / 255.0
            H, Ireg, err = imagesAlign(tmp_np,
                                       Iref,
                                       trfm_type=type,
                                       fillval=0,
                                       rszFac=rszFac,
                                       minArea=minArea)
            Ireg *= 255.0
            Ireg = Ireg.astype('uint8')
            Iout = np2iplimage(Ireg)
        cv.Max(Iout, Imax, Imax)
        cv.Min(Iout, Imin, Imin)
    return Imin.tostring(), Imax.tostring(), cv.GetSize(Imin)
Example #16
0
def capture_draw():
    img = cv.QueryFrame(capture)
    # scale your big ole face down to something small
    thumb = cv.CreateMat(img.height / SCALE, img.width / SCALE, cv.CV_8UC3)
    cv.Resize(img, thumb)
    faces = get_face_roi(thumb)
    for (x, y, w, h), n in faces:
        temp_offset = (x * SCALE, y * SCALE)
        cv.SetImageROI(img,
                       ((x) * SCALE, (y) * SCALE, (w) * SCALE, (h) * SCALE))
        roi_image = cv.CreateImage(cv.GetSize(img), img.depth, img.nChannels)
        cv.Copy(img, roi_image)
        cv.ResetImageROI(img)

        cv.Rectangle(img, (x * SCALE, y * SCALE),
                     (x * SCALE + w * SCALE, y * SCALE + h * SCALE),
                     (255, 0, 0))
        cv.PutText(img, 'face', (x * SCALE, y * SCALE), font, (200, 200, 200))

        FEATURE_SCALE = (float(roi_image.width) / ROI_TARGET_SIZE[0],
                         float(roi_image.height) / ROI_TARGET_SIZE[1])
        roi_thumb = cv.CreateImage((int(roi_image.width / FEATURE_SCALE[0]),
                                    int(roi_image.height / FEATURE_SCALE[1])),
                                   cv.IPL_DEPTH_8U, 3)
        cv.Resize(roi_image, roi_thumb)

        features = get_features(roi_thumb)
        cv.ShowImage("ROI", roi_image)
        for name in features:
            if features[name] != None:
                for (x1, y1, w1, h1), n1 in features[name]:
                    cv.SetImageROI(
                        roi_image,
                        (x1 * FEATURE_SCALE[0], y1 * FEATURE_SCALE[1],
                         w1 * FEATURE_SCALE[0], h1 * FEATURE_SCALE[1]))
                    feature_image = cv.CreateImage(cv.GetSize(roi_image),
                                                   roi_image.depth,
                                                   roi_image.nChannels)
                    cv.Copy(roi_image, feature_image)
                    cv.ResetImageROI(feature_image)
                    cv.ShowImage(name, feature_image)
                    cv.PutText(img, name,
                               (temp_offset[0] + x1 * FEATURE_SCALE[0],
                                temp_offset[1] + y1 * FEATURE_SCALE[1]), font,
                               (200, 200, 200))
                    cv.Rectangle(
                        img, (temp_offset[0] + x1 * FEATURE_SCALE[0],
                              temp_offset[1] + y1 * FEATURE_SCALE[1]),
                        (temp_offset[0] +
                         (x1 + w1) * FEATURE_SCALE[0], temp_offset[1] +
                         (y1 + h1) * FEATURE_SCALE[1]), (0, 255, 255))
    cv.ShowImage("Whole Image", img)
Example #17
0
def points_process_images(images,
                          roi,
                          cam_degree=30,
                          color=True,
                          color_images=[],
                          threshold=220,
                          intrinsics=None,
                          distortion=None):
    """
    extract 3d pixels and colors from either left or right set of images
    """

    angles = [
        math.radians(i * (360.00 / len(images)))
        for i in range(0, len(images))
    ]
    points = []
    xypoints = []
    w, h = roi[2:4]

    for i, path in enumerate(images):
        img = cv.LoadImage(path)

        if intrinsics and distortion:
            source = cv.CloneImage(source_color)
            cv.Undistort2(source, img, intrinsics, distortion)

        cv.SetImageROI(img, roi)
        xy = points_max_cols(img, threshold=threshold)

        xyz = [
            points_triangulate((x - (w / 2), y), angles[i], cam_degree)
            for x, y in xy
        ]

        if color:
            color = cv.LoadImage(color_images[i])

            if intrinsics and distortion:
                source = cv.CloneImage(color)
                cv.Undistort2(source, color, intrinsics, distortion)

            cv.SetImageROI(color, roi)
            colors = [list(color[y, x]) for x, y in xy]
            [xyz[i].extend([r, g, b]) for i, (b, g, r) in enumerate(colors)]

        else:
            xyz = [[x, y, z, 1.0, 1.0, 1.0] for x, y, z in xyz]

        points.extend(xyz)

    return points
Example #18
0
def is_backside(decodings, mark_locs, I, Izero):
    """ Applies Sequoia-specific knowledge. A backside ballot side has
    the following 'barcode' values (assume right-side-up):
        UpperLeft: "0"
        UpperRight: ""    (Just a black bar)
        LowerLeft: "0"
        LowerRight: "0"

    Note: This doesn't detect empty backsides. Assumes that the decoder
    is 'good enough' such that it will not spuriously return "" or "0"
    for an real front-side barcode. 
    
    Output: 
        bool isBack, bool isFlip
    """
    if decodings[0] == "0" and decodings[1] == "":
        # Possibly up-right backside.
        return True, False
    elif decodings[0] == "0" and decodings[1] == "0":
        return True, True
    # Try to handle if ballot is partially-cutoff.
    _roi = cv.GetImageROI(I)
    cv.ResetImageROI(I)
    w_img, h_img = cv.GetSize(I)
    w_fact, h_fact = 0.19, 0.13
    w_patch, h_patch = int(round(w_img * w_fact)), int(round(h_img * h_fact))
    if decodings[0] == "":
        # LHS is possibly cut-off
        cv.SetImageROI(I, (w_img - w_patch, h_img - h_patch, w_patch, h_patch))
        x1, y1, score = tempmatch.bestmatch(
            Izero, [I], do_smooth=tempmatch.SMOOTH_IMG_BRD)[0]
        cv.SetImageROI(I, _roi)
        if score >= 0.9:
            return True, True if decodings[1] == "" else False
        else:
            # No idea! What? Definitely a strange case...
            print "...Wow, this is unexpected!"
            return False, None
    elif decodings[1] == "" and decodings[0] == "0":
        # RHS is possibly cut-off
        cv.SetImageROI(I, (0, h_img - h_patch, w_patch, h_patch))
        x1, y1, score = tempmatch.bestmatch(
            Izero, [I], do_smooth=tempmatch.SMOOTH_IMG_BRD)[0]
        cv.SetImageROI(I, _roi)
        if score >= 0.9:
            return True, False
        else:
            return True, True
    return False, None
Example #19
0
def merge_images(img1, img2, vertical=None):
    w, h = sizeOf(img2)
    new_size, second_roi = ((w * 2, h),
                            (w, 0, w,
                             h)) if h * 1.3 > w and not vertical else ((w,
                                                                        h * 2),
                                                                       (0, h,
                                                                        w, h))
    merged = cv.CreateImage(new_size, img1.depth, img1.channels)
    cv.SetImageROI(merged, (0, 0, w, h))
    cv.Copy(img1, merged)
    cv.SetImageROI(merged, second_roi)
    cv.Copy(img2, merged)
    cv.ResetImageROI(merged)
    return merged
Example #20
0
def mean(img, x, y, size=10):
    oldRoi = cv.GetImageROI(img)

    #make sure roi is within image bounds
    x = min(x, img.width - 1)
    x = max(x, 0)
    y = min(y, img.height - 1)
    y = max(y, 0)

    cv.SetImageROI(img, (x - size / 2, y - size / 2, size, size))
    avg = cv.Avg(img)

    cv.SetImageROI(img, oldRoi)

    return (avg[0] + avg[1] + avg[2]) / 3.0
Example #21
0
def pitch_detect(intrinsics, dist_coeffs, dst0):
    capture = cv.CaptureFromCAM(0)
    src = cv.QueryFrame(capture)
    cv.SetImageROI(dst0, (0, 0, 640, 480))
    cv.Undistort2(src, dst0, intrinsics, dist_coeffs)
    cv.SetImageROI(dst0, image_ROI)
    dst = GetImage(dst0)
    hsv = cv.CreateImage(size, 8, 3)
    CvtColor(dst, hsv, CV_RGB2HSV)
    cv.Split(hsv, hue, sat, val, None)
    hist = cv.CreateHist([32, 64], CV_HIST_ARRAY, [[0, 180], [0, 256]], 1)
    cv.CalcHist([hue, sat], hist, 0, None)
    values = cv.GetMinMaxHistValue(hist)
    tweak = values[3][0]
    return tweak
 def acquire(self, img):
     self.found_data = False
     cv.SetImageROI(img, self.rect.ToCvRect())
     self.internal_img = cv.CreateImage(cv.GetSize(img), img.depth,
                                        img.nChannels)
     cv.Copy(img, self.internal_img)
     cv.ResetImageROI(img)
def getIris(frame):
    iris = []
    copyImg = cv.CloneImage(frame)
    resImg = cv.CloneImage(frame)
    grayImg = cv.CreateImage(cv.GetSize(frame), 8, 1)
    mask = cv.CreateImage(cv.GetSize(frame), 8, 1)
    storage = cv.CreateMat(frame.width, 1, cv.CV_32FC3)
    cv.CvtColor(frame, grayImg, cv.CV_BGR2GRAY)
    cv.Canny(grayImg, grayImg, 5, 70, 3)
    cv.Smooth(grayImg, grayImg, cv.CV_GAUSSIAN, 7, 7)
    circles = getCircles(grayImg)
    iris.append(resImg)
    for circle in circles:
        rad = int(circle[0][2])
        global radius
        radius = rad
        cv.Circle(mask, centroid, rad, cv.CV_RGB(255, 255, 255), cv.CV_FILLED)
        cv.Not(mask, mask)
        cv.Sub(frame, copyImg, resImg, mask)
        x = int(centroid[0] - rad)
        y = int(centroid[1] - rad)
        w = int(rad * 2)
        h = w
        cv.SetImageROI(resImg, (x, y, w, h))
        cropImg = cv.CreateImage((w, h), 8, 3)
        cv.Copy(resImg, cropImg)
        cv.ResetImageROI(resImg)
        return (cropImg)
    return (resImg)
Example #24
0
    def sample_frame(self, frame):
        # Get an average of the green channel in on the forehead
        cv.SetImageROI(frame, self.face_tracker.get_forehead())
        sample = cv.Avg(frame)[1]
        cv.ResetImageROI(frame)

        return sample
Example #25
0
    def drawHistogram(self, image, chnum, hist_arr, plateaus):
        positions = (0, (self.Ihist.height + 10), 2 * self.Ihist.height + 20)
        colour_values = _blue, _green, _red
        colour = colour_values[chnum]
        Y = positions[chnum]

        cv.Set(self.Ihist, _trans)
        bin_w = cv.Round(float(self.Ihist.width) / self.hist_size)
        # min_value, max_value, pmin, pmax = cv.GetMinMaxHistValue(hist)

        X = image.width - self.Ihist.width
        rect = (X, Y, self.Ihist.width, self.Ihist.height)

        cv.SetImageROI(image, rect)
        scaling = self.Ihist.height / max(hist_arr)
        hist_arr *= scaling
        for i, v in enumerate(hist_arr):
            cv.Rectangle(self.Ihist, (i * bin_w, self.Ihist.height),
                         ((i + 1) * bin_w, self.Ihist.height - round(v)),
                         colour, -1, 8, 0)

        for i in plateaus[chnum]:
            cv.Rectangle(
                self.Ihist, (i * bin_w, self.Ihist.height),
                ((i + 1) * bin_w, self.Ihist.height - round(hist_arr[i])),
                _white, -1, 8, 0)

        cv.AddWeighted(image, 1 - self.hist_visibility, self.Ihist,
                       self.hist_visibility, 0.0, image)

        cv.ResetImageROI(image)
Example #26
0
def CompositeThumbnail(img, regions, thumb_size=100):
    '''extract a composite thumbnail for the regions of an image

    The composite will consist of N thumbnails side by side
    '''
    composite = cv.CreateImage((thumb_size*len(regions), thumb_size),8,3)
    for i in range(len(regions)):
        (x1,y1,x2,y2) = regions[i].tuple()
        midx = (x1+x2)/2
        midy = (y1+y2)/2

        if (x2-x1) > thumb_size or (y2-y1) > thumb_size:
            # we need to shrink the region
            rsize = max(x2+1-x1, y2+1-y1)
            src = cuav_util.SubImage(img, (midx-rsize/2,midy-rsize/2,rsize,rsize))
            thumb = cv.CreateImage((thumb_size, thumb_size),8,3)
            cv.Resize(src, thumb)
        else:
            x1 = midx - thumb_size/2
            y1 = midy - thumb_size/2
            thumb = cuav_util.SubImage(img, (x1, y1, thumb_size, thumb_size))
        cv.SetImageROI(composite, (thumb_size*i, 0, thumb_size, thumb_size))
        cv.Copy(thumb, composite)
        cv.ResetImageROI(composite)
    return composite
Example #27
0
 def do1Image(self, image, prevpoints):
     #http://www.aishack.in/2010/07/tracking-colored-objects-in-opencv/
     #http://nashruddin.com/OpenCV_Region_of_Interest_(ROI)
     #http://opencv-users.1802565.n2.nabble.com/Python-cv-Moments-Need-Help-td6044177.html
     #http://stackoverflow.com/questions/5132874/change-elements-in-a-cvseq-in-python
     img = self.getThreshold(image)
     points = []
     for i in range(4):
         cv.SetImageROI(img, (int(
             self.RectanglePoints[i][0]), int(self.RectanglePoints[i][1]),
                              int(self.RectanglePoints[i][2]),
                              int(self.RectanglePoints[i][3])))
         storage = cv.CreateMemStorage(0)
         contours = cv.FindContours(img, storage)
         moments = cv.Moments(contours)
         moment10 = cv.GetSpatialMoment(moments, 1, 0)
         moment01 = cv.GetSpatialMoment(moments, 0, 1)
         area = cv.GetCentralMoment(moments, 0, 0)
         cv.ResetImageROI(img)
         if (area != 0):
             x = self.RectanglePoints[i][0] + (moment10 / area)
             y = self.RectanglePoints[i][1] + (moment01 / area)
         else:
             if (prevpoints[i][0] == 0):
                 x = self.RectanglePoints[i][0]
                 y = self.RectanglePoints[i][1]
             else:
                 x = prevpoints[i][0]
                 y = prevpoints[i][1]
         points.append([x, y])
     return points
Example #28
0
	def rotacion_y_posicion_robot(self,robo_x=200,robo_y=100,robo_th=80):
		"""
		\brief graficar el robot en el lienzo de mapa 
		\param self no se necesita incluirlo al utilizar la funcion ya que se lo pone solo por ser la definicion de una clase
		\param robo_x coordenada x de la odometria del robot 
		\param robo_y coordenada y de la odometria del robot 
		\param robo_th Valor Th del robot
		\return Nada
		"""
		image_mapa=cv.LoadImage(self.nombre_archivo1, cv.CV_LOAD_IMAGE_COLOR)
		dimensiones_robot=self.dimensiones_robot
		image1=cv.CreateImage(dimensiones_robot,8,3)
		image_mascara=cv.CreateImage(dimensiones_robot,8,1)
		
		##rotacion
		#Rotar el robot
		src_center=dimensiones_robot[0]/2,dimensiones_robot[1]/2
		rot_mat=cv.CreateMat( 2, 3, cv.CV_32FC1 )
		cv.GetRotationMatrix2D(src_center, robo_th, 1.0,rot_mat);
		cv.WarpAffine(self.robot,image1,rot_mat)
		#crear filtro para negro
		cv.InRangeS(image1,cv.RGB(0,0,0),cv.RGB(14,14,14),image_mascara)
		cv.Not(image_mascara,image_mascara)
		#cv.ReleaseImage(image1)
		
		#reducir y posicion
		cv.SetImageROI(image_mapa,(robo_x,robo_y, dimensiones_robot[0], dimensiones_robot[1]));
		cv.Copy(image1,image_mapa,mask=image_mascara)
		cv.ResetImageROI(image_mapa);
		cv.SaveImage(self.nombre_archivo, image_mapa) #Saves the image#
Example #29
0
def OverlayImage(img, img2, x, y):
    '''overlay a 2nd image on a first image, at position x,y
	on the first image'''
    (w, h) = cv.GetSize(img2)
    cv.SetImageROI(img, (x, y, w, h))
    cv.Copy(img2, img)
    cv.ResetImageROI(img)
Example #30
0
    def find_corner_in_full_scale(self,point):
        point= self.m_d.scale_up(point)
        scale = self.m_d.scale
        self.m_d.set_scale(0)
        gray_img = self.m_d.gray_img
        canny_img = self.m_d.canny_img
        x,y = point
        cr = correct_rectangle((x-5,y-5,10,10), cv.GetSize(gray_img))
        for img in [gray_img,canny_img]:
            cv.SetImageROI(img, cr)
        cv.Canny(gray_img, canny_img, 300, 500)
        conts = cv.FindContours(canny_img, cv.CreateMemStorage(),
                                cv.CV_RETR_LIST,(cr[0],cr[1]))
        db.DrawContours(self.m_d.tmp_img, conts, (255,255,255), (128,128,128), 10)
        min =10
        min_point = None
        while conts:
            for c in  conts:
                vec = vector(point,c)
                len =length(vec)
                if len<min:
                    min = len
                    min_point = c

            conts.h_next()
        self.m_d.set_scale(scale)
        return min_point