Example #1
0
def show_image():
    global Target
    global Grid
    global Display_Grid
    global Display_Original
    global Img
    global Display
    global Display_Data

    if Display_Original:
        Display = cv.CloneImage(Img)
    else:
        Display = cv.CloneImage(Target)

    if Blank_Image:
        Display = cv.CloneImage(Blank)

    if Display_Grid:
        cv.Or(Display, Grid, Display)

    if Display_Peephole:
        cv.And(Display, Peephole, Display)

    if Display_Data:
        show_data()
        cv.Or(Display, Hex, Display)

    cv.ShowImage("rompar %s" % sys.argv[1], Display)
Example #2
0
File: lrf.py Project: Mnemonic7/lrf
def difference_image(img1, img2):
    print " simg1 = simplify(img1)"
    simg1 = simplify(img1)
    print " simg2 = simplify(img2)"
    simg2 = simplify(img2)

    #dbg_image('simg1',simg1)
    #dbg_image('simg2',simg2)

    #create image buffers
    img3 = cv.CreateImage(cv.GetSize(img2), cv.IPL_DEPTH_8U, 1)
    simg3 = cv.CloneImage(img3)
    bitimage = cv.CreateImage(cv.GetSize(img2), cv.IPL_DEPTH_8U, 1)
    eimg3 = cv.CloneImage(bitimage)

    #process
    print " cv.AbsDiff(simg2,simg1,img3)"
    cv.AbsDiff(simg2, simg1, img3)
    print " cv.Smooth(img3,simg3)"
    cv.Smooth(img3, simg3)
    #dbg_image('simg3',simg3)
    # these threshold values must be calibrated
    #cv.Threshold(simg3,bitimage,16,255,cv.CV_THRESH_TOZERO_INV)
    print " cv.Threshold(simg3,bitimage,16,255,cv.CV_THRESH_BINARY)"
    cv.Threshold(simg3, bitimage, 50, 255, cv.CV_THRESH_BINARY)
    #dbg_image('bitimage',bitimage)
    print " cv.Erode(bitimage,eimg3)"
    cv.Erode(bitimage, eimg3)
    #dbg_image('eimg3',eimg3)
    return eimg3
Example #3
0
    def locateMarker(self, frame):
        self.frameReal = cv.CloneImage(frame)
        self.frameImag = cv.CloneImage(frame)
        self.frameRealThirdHarmonics = cv.CloneImage(frame)
        self.frameImagThirdHarmonics = cv.CloneImage(frame)

        # Calculate convolution and determine response strength.
        cv.Filter2D(self.frameReal, self.frameReal,
                    self.matReal)  # src, dst, kernel
        cv.Filter2D(self.frameImag, self.frameImag,
                    self.matImag)  # src, dst, kernel

        cv.Mul(self.frameReal, self.frameReal,
               self.frameRealSq)  # src, src, dst
        cv.Mul(self.frameImag, self.frameImag,
               self.frameImagSq)  # src, src, dst
        cv.Add(self.frameRealSq, self.frameImagSq, self.frameSumSq)

        # Calculate convolution of third harmonics for quality estimation.
        cv.Filter2D(self.frameRealThirdHarmonics, self.frameRealThirdHarmonics,
                    self.matRealThirdHarmonics)
        cv.Filter2D(self.frameImagThirdHarmonics, self.frameImagThirdHarmonics,
                    self.matImagThirdHarmonics)

        min_val, max_val, min_loc, max_loc = cv.MinMaxLoc(self.frameSumSq)
        self.lastMarkerLocation = max_loc
        (xm, ym) = max_loc
        self.determineMarkerOrientation(frame)
        #	self.determineMarkerQuality_naive(frame)
        self.determineMarkerQuality_Mathias(frame)
        #        self.determineMarkerQuality()
        return max_loc
Example #4
0
 def get_separated_channels(self):
     ''' Split the channels of an image '''
     b = cv.CreateImage(cv.GetSize(self.image), self.image.depth, 1)
     g = cv.CloneImage(b)
     r = cv.CloneImage(b)
     cv.Split(self.image, b, g, r, None)
     return [b, g, r]
Example #5
0
    def scanFaces(src):
        total=0
        c=cv.CloneImage(src)
        frams=[]
        frams.append(src)  # 原图

        cv.Flip(c,None,0) 
        frams.append(c)  # 水平翻转后的

        dst=cv.CreateImage((src.height,src.width),
                src.depth,src.channels)
        cv.Transpose(src,dst)
        cv.Flip(dst,None,0)
        frams.append(dst) # 逆时针90
        
        c2=cv.CloneImage(src)
        cv.Flip(c2,None,0) 
        dst=cv.CreateImage((src.height,src.width),
                src.depth,src.channels)
        cv.Transpose(c2,dst)
        frams.append(dst) # 顺时针90

        for i,img in enumerate(frams):
            count[0]+=ifFace(img,(img.width,img.height))

        if count[0]>=15:
            return True
        else:
            return False
Example #6
0
    def __init__(self):
        """
        O construtor obtem a referencia da webcam e cria uma janela para exibir as imagens.
        """
        # Variavel que vai definir o estado do monitoramento.
        self.estado = True

        # Obtendo a referencia da captura da webCam.
        self.webCam = cv.CaptureFromCAM(0)

        # Obtendo a imagem atual da webCam.
        self.imagem_atual = cv.QueryFrame(self.webCam)

        if self.imagem_atual is None:
            stderr.write('A Web Cam esta desligada. Por favor ligue-a\n')
            exit()
        else:
            # Cria uma nova imagem que sera utilizada para descobrir os contornos na imagem_atual.
            self.imagem_cinza = cv.CreateImage(cv.GetSize(self.imagem_atual), cv.IPL_DEPTH_8U, 1)

            # Cria uma nova imagem que sera utilizada para converter a imagem atual em 32F.
            self.imagem_auxiliar = cv.CreateImage(cv.GetSize(self.imagem_atual), cv.IPL_DEPTH_32F, 3)

            # Imagem sera utilizada para guardar a diferenca entre a imagem atual e anterior.
            self.imagem_diferenca = None

            # Obtendo a area total da imagem da webCam.
            self.area = self.imagem_atual.width * self.imagem_atual.height
            self.area_corrente = 0

            self.imagem_diferenca = cv.CloneImage(self.imagem_atual)
            self.imagem_anterior = cv.CloneImage(self.imagem_atual)

            # Tenho que converter a imagem_atual em 32F para poder calcular a media em "RuningAvg".
            cv.Convert(self.imagem_atual, self.imagem_auxiliar)
    def processImage(self, curframe):
        cv.Smooth(curframe, curframe)  #Remove false positives

        if not self.absdiff_frame:  #For the first time put values in difference, temp and moving_average
            self.absdiff_frame = cv.CloneImage(curframe)
            self.previous_frame = cv.CloneImage(curframe)
            cv.Convert(
                curframe, self.average_frame
            )  #Should convert because after runningavg take 32F pictures
        else:
            cv.RunningAvg(curframe, self.average_frame,
                          0.05)  #Compute the average

        cv.Convert(self.average_frame,
                   self.previous_frame)  #Convert back to 8U frame

        cv.AbsDiff(curframe, self.previous_frame,
                   self.absdiff_frame)  # moving_average - curframe

        cv.CvtColor(
            self.absdiff_frame, self.gray_frame,
            cv.CV_RGB2GRAY)  #Convert to gray otherwise can't do threshold
        cv.Threshold(self.gray_frame, self.gray_frame, 50, 255,
                     cv.CV_THRESH_BINARY)

        cv.Dilate(self.gray_frame, self.gray_frame, None,
                  15)  #to get object blobs
        cv.Erode(self.gray_frame, self.gray_frame, None, 10)
Example #8
0
    def initialize(self, frame):
        # Initialize
        # log_file_name = "tracker_output.log"
        # log_file = file( log_file_name, 'a' )
        
        print  str(type(frame))
        print "resize to ::: " + str(cv.GetSize(frame)) + " " +  str(type(frame))
        (w, h) = cv.GetSize(frame)
#         gray = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        size = (w, h) #cv.GetSize(frame)#(300 , 300)
        self.thumbnail = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
       
        self.grey_average_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
        self.grey_original_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
#         cv.CvtColor(display_image, gray, cv.CV_RGB2GRAY)
#         prev_image = gray
        
        # Greyscale image, thresholded to create the motion mask:
        self.grey_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
        
        
        
        # The RunningAvg() function requires a 32-bit or 64-bit image...
        self.running_average_image = cv.CreateImage(size, cv.IPL_DEPTH_32F, 3)
        
        # ...but the AbsDiff() function requires matching image depths:
        self.running_average_in_display_color_depth = cv.CloneImage(self.thumbnail)
        
        # RAM used by FindContours():
        self.mem_storage = cv.CreateMemStorage(0)
        
        # The difference between the running average and the current frame:
        self.difference = cv.CloneImage(self.thumbnail)
        
        self.target_count = 1
        self.last_target_count = 1
        self.last_target_change_t = 0.0
        self.k_or_guess = 1
        self.codebook = []
       
        self.last_frame_entity_list = []
        
        self.frame_count = 0
        
        # For toggling display:
        image_list = [ "camera", "difference", "threshold", "display", "faces" ]
        image_index = 3  # Index into image_list
    
    
        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        
        # Set this to the max number of targets to look for (passed to k-means):
        self.max_targets = 5
 def im_to_lsb(self):
     b = cv.CreateImage(cv.GetSize(self.image), self.image.depth, 1)
     g = cv.CloneImage(b)
     r = cv.CloneImage(b)
     cv.Split(self.image, b, g, r, None)
     for j in range(self.image.height):
         for i in range(self.image.width):
             pixb, pixg, pixr = self.image[j, i]
             b[j, i] = 255 if int(pixb) & 1 else 0
             g[j, i] = 255 if int(pixg) & 1 else 0
             r[j, i] = 255 if int(pixr) & 1 else 0
     return [b, g, r]
Example #10
0
 def __init__(self, img0):
     self.thresh1 = 255
     self.thresh2 = 30
     self.level =4
     self.storage = cv.CreateMemStorage()
     cv.NamedWindow("Source", 0)
     cv.ShowImage("Source", img0)
     cv.NamedWindow("Segmentation", 0)
     cv.CreateTrackbar("Thresh1", "Segmentation", self.thresh1, 255, self.set_thresh1)
     cv.CreateTrackbar("Thresh2", "Segmentation",  self.thresh2, 255, self.set_thresh2)
     self.image0 = cv.CloneImage(img0)
     self.image1 = cv.CloneImage(img0)
     cv.ShowImage("Segmentation", self.image1)
Example #11
0
def getContours(im, approx_value=1):  #Return contours approximated
    storage = cv.CreateMemStorage(0)
    contours = cv.FindContours(cv.CloneImage(im), storage, cv.CV_RETR_CCOMP,
                               cv.CV_CHAIN_APPROX_SIMPLE)
    contourLow = cv.ApproxPoly(contours, storage, cv.CV_POLY_APPROX_DP,
                               approx_value, approx_value)
    return contourLow
Example #12
0
    def callFaceTracker(self, imageFrame):

        imageMat = cv.fromarray(imageFrame)
        image = cv.GetImage(imageMat)
        (isFaceDetected, detectedFaceImage, wholeImage, pt1,
         pt2) = self.tracker.detect_and_draw(image)

        if isFaceDetected:

            array = np.asarray(detectedFaceImage, np.uint8, 3)
            arrayCopy = array.copy()
            cv2.imshow("face image ", arrayCopy)
            #             print "got type " + str(type(arrayCopy))
            a = Image.fromarray(arrayCopy)
            b = ImageTk.PhotoImage(image=a)

            self.canvas2.create_image(0, 0, image=b, anchor=tk.NW)
            self.canvas2.update()

            print "here....."

            self.goToNextState(True)

            originalImage2 = cv.CloneImage(wholeImage)
            self.camshift = Camshift()
            self.camshift.defineRegionOfInterest(originalImage2, pt1, pt2)
def on_mouse(event, x, y, flag, params):
    global start_draw
    global roi_x0
    global roi_y0
    global roi_x1
    global roi_y1
    global image2

    if (event == cv.CV_EVENT_LBUTTONDOWN):
        print("LButton")
        if (not start_draw):
            roi_x0 = x
            roi_y0 = y
            start_draw = True
        else:
            roi_x1 = x
            roi_y1 = y
            start_draw = False

    elif (event == cv.CV_EVENT_MOUSEMOVE and start_draw):
        #Redraw ROI selection
        image2 = cv.CloneImage(image)
        if (len(rect_list) > 0):
            for coord in rect_list:
                cv.Rectangle(image2, coord[0], coord[1], cv.CV_RGB(255, 0, 0),
                             5)
        cv.Rectangle(image2, (roi_x0, roi_y0), (x, y), cv.CV_RGB(255, 0, 255),
                     5)
        cv.ShowImage(window_name, image2)
Example #14
0
File: lrf.py Project: Mnemonic7/lrf
def capture_image(capture, mapx=None, mapy=None):
    img = cv.CloneImage(cv.QueryFrame(capture))

    #Flip
    if FLIP_IMAGE:
        img2 = cv.CloneImage(img)
        cv.Flip(img, img2, 0)
        cv.Flip(img2, img, 1)

    #undistort if calibration matrices were given
    if mapx != None and mapy != None:
        udimg = cv.CloneImage(img)
        cv.Remap(img, udimg, mapx, mapy)
        img = udimg

    return img
Example #15
0
 def rectif(self, image, cadreIn, cadreOut):
     prev_image = cv.CloneImage(image)
     cv.Zero(image)
     mmat = cv.CreateMat(3, 3, cv.CV_32FC1)
     print("mmat= %s" % repr(mmat))
     cv.GetPerspectiveTransform(cadreIn, cadreOut, mmat)
     cv.WarpPerspective(prev_image, image,
                        mmat)  #, flags=cv.CV_WARP_INVERSE_MAP )
def update_drawing():
    blink = cv.CloneImage(drawing)
    if hands:
      for id in hands:
        cv.Circle(blink, hands[id]['current_position'], 10, hands[id]['color']['cv'], -1, cv.CV_AA, 0)
        if hands[id]['drawing'] == True:
          cv.Line(drawing, hands[id]['previous_position'], hands[id]['current_position'], hands[id]['color']['cv'], 10, cv.CV_AA, 0) 
    cv.ShowImage('Drawing', blink)
Example #17
0
 def set_image(self, img, bgr=False):
     '''set the currently displayed image'''
     if not self.is_alive():
         return
     if bgr:
         img = cv.CloneImage(img)
         cv.CvtColor(img, img, cv.CV_BGR2RGB)
     self.in_queue.put(MPImageData(img))
Example #18
0
def Process(image, pos_var, pos_w, pos_phase, pos_psi):
    global kernel_size
    if kernel_size % 2 == 0:
        kernel_size += 1

    kernel = cv.CreateMat(kernel_size, kernel_size, cv.CV_32FC1)
    # kernelimg = cv.CreateImage((kernel_size,kernel_size),cv.IPL_DEPTH_32F,1)
    # big_kernelimg = cv.CreateImage((kernel_size*20,kernel_size*20),cv.IPL_DEPTH_32F,1)
    src = cv.CreateImage((image.width, image.height), cv.IPL_DEPTH_8U, 1)
    src_f = cv.CreateImage((image.width, image.height), cv.IPL_DEPTH_32F, 1)

    # src = image #cv.CvtColor(image,src,cv.CV_BGR2GRAY) #no conversion is needed
    if cv.GetElemType(image) == cv.CV_8UC3:
        cv.CvtColor(image, src, cv.CV_BGR2GRAY)
    else:
        src = image

    cv.ConvertScale(src, src_f, 1.0 / 255, 0)
    dest = cv.CloneImage(src_f)
    dest_mag = cv.CloneImage(src_f)

    var = pos_var / 10.0
    w = pos_w / 10.0
    phase = pos_phase * cv.CV_PI / 180.0
    psi = cv.CV_PI * pos_psi / 180.0

    cv.Zero(kernel)
    for x in range(-kernel_size / 2 + 1, kernel_size / 2 + 1):
        for y in range(-kernel_size / 2 + 1, kernel_size / 2 + 1):
            kernel_val = math.exp(-(
                (x * x) +
                (y * y)) / (2 * var)) * math.cos(w * x * math.cos(phase) +
                                                 w * y * math.sin(phase) + psi)
            cv.Set2D(kernel, y + kernel_size / 2, x + kernel_size / 2,
                     cv.Scalar(kernel_val))
            # cv.Set2D(kernelimg,y+kernel_size/2,x+kernel_size/2,cv.Scalar(kernel_val/2+0.5))
    cv.Filter2D(src_f, dest, kernel, (-1, -1))
    # cv.Resize(kernelimg,big_kernelimg)
    cv.Pow(dest, dest_mag, 2)

    # return (dest_mag, big_kernelimg, dest)
    return (dest_mag, dest)
    # cv.ShowImage("Mag",dest_mag)
    # cv.ShowImage("Kernel",big_kernelimg)
    # cv.ShowImage("Process window",dest)
Example #19
0
 def __init__(self, input_filename, output_filename):
     self.prev_pt = None
     self.outname = output_filename
     self.orig = cv.LoadImage(input_filename)
     self.image = cv.CloneImage(self.orig)
     self.chans = self.im_to_lsb()
     cv.ShowImage("image", self.image)
     cv.ShowImage("LSB", self.chans[0])
     cv.SetMouseCallback("image", self.on_mouse)
Example #20
0
def displayDepth(name, dep, imshow=cv.ShowImage):
    width = cv.GetImageROI(dep)[2]
    height = cv.GetImageROI(dep)[3]
    disp = cv.CloneImage(dep)

    cv.ConvertScale(disp, disp, 10);
    imshow(name, disp)

    del disp
Example #21
0
	def workingThumb(self, frame, frameSize):
		if frameSize[0] <= 640 and frameSize[1] <= 480:
			small = cv.CloneImage(frame)
		else:
			small = cv.CreateImage( ( 640, int((640/float(frameSize[0])) * frameSize[1]) ), frame.depth, frame.channels)
			smallSize = cv.GetSize(small)
			cv.Resize(frame, small)
			
			
		return small, smallSize
def altera_quadro():
    blink = cv.CloneImage(quadro)
    if maos:
      for id in maos:
        cv.Circle(blink, maos[id]['atual'], 10, cv.CV_RGB(0, 0, 150), -1, cv.CV_AA, 0)
        if 'anterior' in maos[id]:
          if efeito == 'Caneta':
            cv.Line(quadro, maos[id]['anterior'], maos[id]['atual'], cv.CV_RGB(0,0,0), 1, cv.CV_AA, 0) 
          elif efeito == 'Apagador':
            cv.Line(quadro, maos[id]['anterior'], maos[id]['atual'], cv.CV_RGB(255,255,255), 30, cv.CV_AA, 0) 
    cv.ShowImage('Quadro', blink)
def getThresholdImage(im):
    newim = cv.CloneImage(im)
    cv.Smooth(newim, newim, cv.CV_BLUR, 12)  #Remove noise

    hsv = cv.CreateImage(cv.GetSize(im), 8, 3)
    cv.CvtColor(newim, hsv, cv.CV_BGR2HSV)  # Convert image to HSV
    imThreshed = cv.CreateImage(cv.GetSize(im), 8, 1)
    #Do the threshold on the hsv image, with the right range for the yellow color
    cv.InRangeS(hsv, cv.Scalar(20, 100, 100), cv.Scalar(30, 255, 255),
                imThreshed)
    del hsv
    return imThreshed
Example #24
0
def find_squares4(color_img):
    """
    Finds multiple squares in image

    Steps:
    -Use Canny edge to highlight contours, and dilation to connect
    the edge segments.
    -Threshold the result to binary edge tokens
    -Use cv.FindContours: returns a cv.CvSequence of cv.CvContours
    -Filter each candidate: use Approx poly, keep only contours with 4 vertices,
    enough area, and ~90deg angles.

    Return all squares contours in one flat list of arrays, 4 x,y points each.
    """
    #select even sizes only
    width, height = (color_img.width & -2, color_img.height & -2)
    timg = cv.CloneImage(color_img)  # make a copy of input image
    gray = cv.CreateImage((width, height), 8, 1)

    # select the maximum ROI in the image
    cv.SetImageROI(timg, (0, 0, width, height))

    # down-scale and upscale the image to filter out the noise
    pyr = cv.CreateImage((width / 2, height / 2), 8, 3)
    cv.PyrDown(timg, pyr, 7)
    cv.PyrUp(pyr, timg, 7)

    tgray = cv.CreateImage((width, height), 8, 1)
    squares = []

    # Find squares in every color plane of the image
    # Two methods, we use both:
    # 1. Canny to catch squares with gradient shading. Use upper threshold
    # from slider, set the lower to 0 (which forces edges merging). Then
    # dilate canny output to remove potential holes between edge segments.
    # 2. Binary thresholding at multiple levels
    N = 11
    for c in [0, 1, 2]:
        #extract the c-th color plane
        cv.SetImageCOI(timg, c + 1)
        cv.Copy(timg, tgray, None)
        cv.Canny(tgray, gray, 0, 50, 5)
        cv.Dilate(gray, gray)
        squares = squares + find_squares_from_binary(gray)

        # Look for more squares at several threshold levels
        for l in range(1, N):
            cv.Threshold(tgray, gray, (l + 1) * 255 / N, 255,
                         cv.CV_THRESH_BINARY)
            squares = squares + find_squares_from_binary(gray)

    return squares
Example #25
0
    def redraw_map(self):
        '''redraw the map with current settings'''
        state = self.state

        view_same = (self.last_view and self.map_img
                     and self.last_view == self.current_view())

        if view_same and not state.need_redraw:
            return

        # get the new map
        self.map_img = state.mt.area_to_image(state.lat, state.lon,
                                              state.width, state.height,
                                              state.ground_width)
        if state.brightness != 1.0:
            cv.ConvertScale(self.map_img, self.map_img, scale=state.brightness)

        # find display bounding box
        (lat2, lon2) = self.coordinates(state.width - 1, state.height - 1)
        bounds = (lat2, state.lon, state.lat - lat2, lon2 - state.lon)

        # get the image
        img = cv.CloneImage(self.map_img)

        # possibly draw a grid
        if state.grid:
            SlipGrid('grid', layer=3, linewidth=1,
                     colour=(255, 255, 0)).draw(img, self.pixmapper, bounds)

        # draw layer objects
        keys = state.layers.keys()
        keys.sort()
        for k in keys:
            self.draw_objects(state.layers[k], bounds, img)

        # draw information objects
        for key in state.info:
            state.info[key].draw(state.panel, state.panel.information)

        # display the image
        self.img = wx.EmptyImage(state.width, state.height)
        self.img.SetData(img.tostring())
        self.imagePanel.set_image(self.img)

        self.update_position()

        self.mainSizer.Fit(self)
        self.Refresh()
        self.last_view = self.current_view()
        self.SetFocus()
        state.need_redraw = False
Example #26
0
def show_image(self):
    if self.config.img_display_original:
        self.img_display = cv.CloneImage(self.img_original)
    else:
        self.img_display = cv.CloneImage(self.img_target)

    if self.config.img_display_blank_image:
        self.img_display = cv.CloneImage(self.img_blank)

    if self.config.img_display_grid:
        cv.Or(self.img_display, self.img_grid, self.img_display)

    if self.config.img_display_peephole:
        cv.And(self.img_display, self.img_peephole, self.img_display)

    if self.config.img_display_data:
        show_data(self)
        cv.Or(self.img_display, self.img_hex, self.img_display)

    self.img_display_viewport = self.img_display[
        self.config.view.y:self.config.view.y + self.config.view.h,
        self.config.view.x:self.config.view.x + self.config.view.w]
    cv.ShowImage(self.title, self.img_display_viewport)
Example #27
0
    def run(self):

        copy = cv.CloneImage(self.image)

        while True:
            if self.drag_start and is_rect_nonzero(self.selection):
                copy = cv.CloneImage(self.image)
                sub = cv.GetSubRect(copy, self.selection)  #Get specified area

                #Make the effect of background shadow when selecting a window
                save = cv.CloneMat(sub)

                cv.ConvertScale(copy, copy, 0.5)
                cv.Copy(save, sub)

                #Draw temporary rectangle
                x, y, w, h = self.selection
                cv.Rectangle(copy, (x, y), (x + w, y + h), (255, 255, 255))

            cv.ShowImage("Image", copy)
            c = cv.WaitKey(1)
            if c == 27 or c == 1048603 or c == 10:  #Break if user enters 'Esc'.
                break
Example #28
0
    def save_image(self):
        new = cv.CloneImage(self.orig)
        for j in range(new.height):
            for i in range(new.width):
                pix = []
                b, g, r = [int(im[j, i]) for im in self.chans]
                curb, curg, curr = [int(x) for x in new[j, i]]
                pix.append(curb | 1 if b == 255 else curb & 254)
                pix.append(curg | 1 if g == 255 else curg & 254)
                pix.append(curr | 1 if r == 255 else curr & 254)
                new[j, i] = tuple(pix)
        cv.SaveImage(self.outname, new)

        print "Saved in: " + self.outname
    def img(self):
        '''return a cv image for the icon'''
        SlipThumbnail.img(self)

        if self.rotation:
            # rotate the image
            mat = cv.CreateMat(2, 3, cv.CV_32FC1)
            cv.GetRotationMatrix2D((self.width / 2, self.height / 2),
                                   -self.rotation, 1.0, mat)
            self._rotated = cv.CloneImage(self._img)
            cv.WarpAffine(self._img, self._rotated, mat)
        else:
            self._rotated = self._img
        return self._rotated
Example #30
0
    def process_image(self, slider_pos):
        """
        This function finds contours, draws them and their approximation by ellipses.
        """
        stor = cv.CreateMemStorage()

        # Create the destination images
        image02 = cv.CloneImage(self.source_image)
        cv.Zero(image02)
        image04 = cv.CreateImage(cv.GetSize(self.source_image),
                                 cv.IPL_DEPTH_8U, 3)
        cv.Zero(image04)

        # Threshold the source image. This needful for cv.FindContours().
        cv.Threshold(self.source_image, image02, slider_pos, 255,
                     cv.CV_THRESH_BINARY)

        # Find all contours.
        cont = cv.FindContours(image02, stor, cv.CV_RETR_LIST,
                               cv.CV_CHAIN_APPROX_NONE, (0, 0))

        for c in contour_iterator(cont):
            # Number of points must be more than or equal to 6 for cv.FitEllipse2
            if len(c) >= 6:
                # Copy the contour into an array of (x,y)s
                PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2)
                for (i, (x, y)) in enumerate(c):
                    PointArray2D32f[0, i] = (x, y)

                # Draw the current contour in gray
                gray = cv.CV_RGB(100, 100, 100)
                cv.DrawContours(image04, c, gray, gray, 0, 1, 8, (0, 0))

                # Fits ellipse to current contour.
                (center, size, angle) = cv.FitEllipse2(PointArray2D32f)

                # Convert ellipse data from float to integer representation.
                center = (cv.Round(center[0]), cv.Round(center[1]))
                size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5))

                # Draw ellipse in random color
                color = cv.CV_RGB(random.randrange(256), random.randrange(256),
                                  random.randrange(256))
                cv.Ellipse(image04, center, size, angle, 0, 360, color, 2,
                           cv.CV_AA, 0)

        # Show image. HighGUI use.
        cv.ShowImage("Result", image04)