def on_mouse(event, x, y, flag, params):
    global start_draw
    global roi_x0
    global roi_y0
    global roi_x1
    global roi_y1
    global image2

    if (event == cv.CV_EVENT_LBUTTONDOWN):
        print("LButton")
        if (not start_draw):
            roi_x0 = x
            roi_y0 = y
            start_draw = True
        else:
            roi_x1 = x
            roi_y1 = y
            start_draw = False

    elif (event == cv.CV_EVENT_MOUSEMOVE and start_draw):
        #Redraw ROI selection
        image2 = cv.CloneImage(image)
        if (len(rect_list) > 0):
            for coord in rect_list:
                cv.Rectangle(image2, coord[0], coord[1], cv.CV_RGB(255, 0, 0),
                             5)
        cv.Rectangle(image2, (roi_x0, roi_y0), (x, y), cv.CV_RGB(255, 0, 255),
                     5)
        cv.ShowImage(window_name, image2)
Example #2
0
 def _mixImageAlphaMask(self, wipeSettings, level, image1, image2, image2mask, mixMat):
     if(level < 0.99):
         wipeMode, wipePostMix, wipeConfig = wipeSettings
         if((wipeMode == WipeMode.Fade) or (wipeMode == WipeMode.Default)):
             valueCalc = int(256 * (1.0 - level))
             rgbColor = cv.CV_RGB(valueCalc, valueCalc, valueCalc)
             whiteColor = cv.CV_RGB(255, 255, 255)
             cv.Set(mixMat, whiteColor)
             cv.Set(mixMat, rgbColor, image2mask)
             cv.Mul(image1, mixMat, image1, 0.004)
             valueCalc = int(256 * level)
             rgbColor = cv.CV_RGB(valueCalc, valueCalc, valueCalc)
             cv.Zero(mixMat)
             cv.Set(mixMat, rgbColor, image2mask)
             cv.Mul(image2, mixMat, image2, 0.004)
             cv.Add(image1, image2, image1)
             return image1
         else:
             if(wipePostMix == False):
                 image2, image2mask = self._wipeImage(wipeMode, wipeConfig, level, image2, image2mask, mixMat, False)
                 cv.Copy(image2, image1, image2mask)
                 return image1
             else:
                 cv.Copy(image1, mixMat)
                 cv.Copy(image2, mixMat, image2mask)
                 return self._wipeMix(wipeMode, wipeConfig, level, image1, mixMat, image2)
     cv.Copy(image2, image1, image2mask)
     return image1
Example #3
0
def findImageContour(img, frame):
    storage = cv.CreateMemStorage()
    cont = cv.FindContours(img, storage, cv.CV_RETR_EXTERNAL,
                           cv.CV_CHAIN_APPROX_NONE, (0, 0))
    max_center = [None, 0]
    for c in contour_iterator(cont):
        # Number of points must be more than or equal to 6 for cv.FitEllipse2
        # Use to set minimum size of object to be tracked.
        if len(c) >= 60:
            # Copy the contour into an array of (x,y)s
            PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2)
            for (i, (x, y)) in enumerate(c):
                PointArray2D32f[0, i] = (x, y)
                # Fits ellipse to current contour.
                (center, size, angle) = cv.FitEllipse2(PointArray2D32f)
                # Only consider location of biggest contour  -- adapt for multiple object tracking
            if size > max_center[1]:
                max_center[0] = center
                max_center[1] = size
                angle = angle

            if True:
                # Draw the current contour in gray
                gray = cv.CV_RGB(255, 255, 255)
                cv.DrawContours(img, c, gray, gray, 0, 1, 8, (0, 0))

    if max_center[1] > 0:
        # Convert ellipse data from float to integer representation.
        center = (cv.Round(max_center[0][0]), cv.Round(max_center[0][1]))
        size = (cv.Round(max_center[1][0] * 0.5),
                cv.Round(max_center[1][1] * 0.5))
        color = cv.CV_RGB(255, 0, 0)

        cv.Ellipse(frame, center, size, angle, 0, 360, color, 3, cv.CV_AA, 0)
def processa_frame(imagem):
    cv.SetData(imagem_cv, imagem)
    if maos:
      for id in maos:
        cv.PutText(imagem_cv, efeito, maos[id]['atual'] ,fonte_do_texto , cv.CV_RGB(0,0,150))
    cv.PutText(imagem_cv, 'Efeito: '+efeito, (10,20) ,fonte_do_texto , cv.CV_RGB(200,0,0))
    cv.ShowImage('Video', imagem_cv)
Example #5
0
def lines2():
    im = cv.LoadImage('roi_edges.jpg', cv.CV_LOAD_IMAGE_GRAYSCALE)
    pi = math.pi
    x = 0
    dst = cv.CreateImage(cv.GetSize(im), 8, 1)
    cv.Canny(im, dst, 200, 200)
    cv.Threshold(dst, dst, 100, 255, cv.CV_THRESH_BINARY)
    color_dst_standard = cv.CreateImage(cv.GetSize(im), 8, 3)
    cv.CvtColor(im, color_dst_standard,
                cv.CV_GRAY2BGR)  #Create output image in RGB to put red lines
    lines = cv.HoughLines2(dst, cv.CreateMemStorage(0), cv.CV_HOUGH_STANDARD,
                           1, pi / 100, 71, 0, 0)
    klsum = 0
    klaver = 0
    krsum = 0
    kraver = 0

    #global k
    #k=0
    for (rho, theta) in lines[:100]:
        kl = []
        kr = []
        a = math.cos(theta)
        b = math.sin(theta)
        x0 = a * rho
        y0 = b * rho
        pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
        pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
        k = ((y0 - 1000 * (a)) - (y0 + 1000 * (a))) / ((x0 - 1000 * (-b)) -
                                                       (x0 + 1000 * (-b)))

        if abs(k) < 0.4:
            pass
        elif k > 0:
            kr.append(k)
            len_kr = len(kr)
            for i in kr:
                krsum = krsum + i
                kraver = krsum / len_kr

                cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2,
                        4)
        elif k < 0:
            kr.append(k)
            kl.append(k)
            len_kl = len(kl)
            for i in kl:
                klsum = klsum + i
                klaver = klsum / len_kl
                cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2,
                        4)
        #print k
    #  cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2, 4)
    cv.SaveImage('lane.jpg', color_dst_standard)
    print '左车道平均斜率:', klaver, '  右车道平均斜率:', kraver
    cv.ShowImage("Hough Standard", color_dst_standard)
    cv.WaitKey(0)
Example #6
0
def draw_common(points):
    success, center, radius = cv.MinEnclosingCircle(points)
    if success:
        cv.Circle(img, roundxy(center), cv.Round(radius),
                  cv.CV_RGB(255, 255, 0), 1, cv.CV_AA, 0)

    box = cv.MinAreaRect2(points)
    box_vtx = [roundxy(p) for p in cv.BoxPoints(box)]
    cv.PolyLine(img, [box_vtx], 1, cv.CV_RGB(0, 255, 255), 1, cv.CV_AA)
def altera_quadro():
    blink = cv.CloneImage(quadro)
    if maos:
      for id in maos:
        cv.Circle(blink, maos[id]['atual'], 10, cv.CV_RGB(0, 0, 150), -1, cv.CV_AA, 0)
        if 'anterior' in maos[id]:
          if efeito == 'Caneta':
            cv.Line(quadro, maos[id]['anterior'], maos[id]['atual'], cv.CV_RGB(0,0,0), 1, cv.CV_AA, 0) 
          elif efeito == 'Apagador':
            cv.Line(quadro, maos[id]['anterior'], maos[id]['atual'], cv.CV_RGB(255,255,255), 30, cv.CV_AA, 0) 
    cv.ShowImage('Quadro', blink)
def processa_frame(imagem):
    cv.SetData(imagem_cv, imagem)
    if gesto == False:
        cv.PutText(imagem_cv, 'Acene para ser Rastreado!', (80, 50),
                   fonte_do_texto, cv.CV_RGB(0, 0, 0))
    cv.Circle(imagem_cv, centro, 16, cv.CV_RGB(0, 0, 255), 2, cv.CV_AA, 0)
    cv.PutText(imagem_cv, 'Real(mm): ' + coordenada_real, (80, 435),
               fonte_do_texto, cv.CV_RGB(255, 255, 255))
    cv.PutText(imagem_cv, 'Convertido(px): ' + coordenada_projecao, (80, 465),
               fonte_do_texto, cv.CV_RGB(255, 255, 255))
    cv.ShowImage('Video', imagem_cv)
Example #9
0
def processa_frame(imagem):
    cv.SetData(imagem_cv, imagem)
    if maos:
        for id in maos:
            cv.PutText(imagem_cv,
                       ', '.join(str(int(e)) for e in maos[id]['real']),
                       maos[id]['projecao'], fonte_do_texto,
                       cv.CV_RGB(0, 0, 150))
    else:
        cv.PutText(imagem_cv, 'Acene para ser Rastreado', (10, 20),
                   fonte_do_texto, cv.CV_RGB(200, 0, 0))
    cv.ShowImage('Video', imagem_cv)
def Color_callibration(capture):
    vals = []
    bgr = []
    mini = [255, 255, 255]
    maxi = [0, 0, 0]
    cv.NamedWindow("BGR", 0)
    print 'Please Put Your color in the circular area.Press ESC to start Callibration:'
    while 1:
        image = cv.QueryFrame(capture)
        cv.Flip(image, image, 1)
        cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4)
        cv.ShowImage("BGR", image)
        c = cv.WaitKey(33)
        if c == 27:
            break
    print 'Starting Callibration...Analyzing the Object...'
    for i in range(0, 100):
        image = cv.QueryFrame(capture)
        cv.Flip(image, image, 1)
        cv.Smooth(image, image, cv.CV_MEDIAN, 3, 0)
        imagehsv = cv.CreateImage(cv.GetSize(image), 8, 3)
        cv.CvtColor(image, imagehsv, cv.CV_BGR2YCrCb)
        vals = cv.Get2D(imagehsv, 300, 200)
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8)
        cv.PutText(
            image,
            "  " + str(vals[0]) + "," + str(vals[1]) + "," + str(vals[2]),
            (200, 300), font, (55, 25, 255))
        for j in range(0, 3):
            if (vals[j] < mini[j]): mini[j] = vals[j]
            if (vals[j] > maxi[j]): maxi[j] = vals[j]
        cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4)
        cv.ShowImage("BGR", image)
        c = cv.WaitKey(33)
        if c == 27:
            break
    print 'Analyzation Completed'
    mini[0] -= 35
    mini[1] -= 15
    mini[2] -= 15
    maxi[0] += 35
    maxi[1] += 15
    maxi[2] += 15
    for i in range(0, 3):
        if (mini[i] < 0):
            mini[i] = 0
        if (maxi[i] > 255):
            maxi[i] = 255
    cv.DestroyWindow("BGR")
    bgr = (mini, maxi)
    return bgr
Example #11
0
    def process_image(self, slider_pos):
        """
        This function finds contours, draws them and their approximation by ellipses.
        """
        stor = cv.CreateMemStorage()

        # Create the destination images
        image02 = cv.CloneImage(self.source_image)
        cv.Zero(image02)
        image04 = cv.CreateImage(cv.GetSize(self.source_image),
                                 cv.IPL_DEPTH_8U, 3)
        cv.Zero(image04)

        # Threshold the source image. This needful for cv.FindContours().
        cv.Threshold(self.source_image, image02, slider_pos, 255,
                     cv.CV_THRESH_BINARY)

        # Find all contours.
        cont = cv.FindContours(image02, stor, cv.CV_RETR_LIST,
                               cv.CV_CHAIN_APPROX_NONE, (0, 0))

        for c in contour_iterator(cont):
            # Number of points must be more than or equal to 6 for cv.FitEllipse2
            if len(c) >= 6:
                # Copy the contour into an array of (x,y)s
                PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2)
                for (i, (x, y)) in enumerate(c):
                    PointArray2D32f[0, i] = (x, y)

                # Draw the current contour in gray
                gray = cv.CV_RGB(100, 100, 100)
                cv.DrawContours(image04, c, gray, gray, 0, 1, 8, (0, 0))

                # Fits ellipse to current contour.
                (center, size, angle) = cv.FitEllipse2(PointArray2D32f)

                # Convert ellipse data from float to integer representation.
                center = (cv.Round(center[0]), cv.Round(center[1]))
                size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5))

                # Draw ellipse in random color
                color = cv.CV_RGB(random.randrange(256), random.randrange(256),
                                  random.randrange(256))
                cv.Ellipse(image04, center, size, angle, 0, 360, color, 2,
                           cv.CV_AA, 0)

        # Show image. HighGUI use.
        cv.ShowImage("Result", image04)
Example #12
0
	def drawBoundingRect(self, frame, b_rect, smallSize = None):
		
		bounding_rect = [0,0,0,0]
		if smallSize != None:
			size = cv.GetSize(frame)
			scale = size[0] / float(smallSize[0])
			bounding_rect[0] = int(b_rect[0] * scale)
			bounding_rect[1] = int(b_rect[1] * scale)
			bounding_rect[2] = int(b_rect[2] * scale)
			bounding_rect[3] = int(b_rect[3] * scale)
		
		l = 10
		c = cv.CV_RGB(200,120,120)
		point1 = ( bounding_rect[0] - self._bRectOffset, bounding_rect[1] - self._bRectOffset )
		point2 = ( bounding_rect[0] - self._bRectOffset, bounding_rect[1] + bounding_rect[3] + self._bRectOffset )
		point3 = ( bounding_rect[0] + bounding_rect[2] + self._bRectOffset, bounding_rect[1] - self._bRectOffset )
		point4 = ( bounding_rect[0] + bounding_rect[2] + self._bRectOffset, bounding_rect[1] + bounding_rect[3] + self._bRectOffset)
		
		cv.Line(frame, point1, (point1[0] + l, point1[1]) , c)
		cv.Line(frame, point1, (point1[0], point1[1] + l) , c)

		cv.Line(frame, point2, (point2[0] + l, point2[1]) , c)
		cv.Line(frame, point2, (point2[0], point2[1] - l) , c)

		cv.Line(frame, point3, (point3[0] - l, point3[1]) , c)
		cv.Line(frame, point3, (point3[0], point3[1] + l) , c)

		cv.Line(frame, point4, (point4[0] - l, point4[1]) , c)
		cv.Line(frame, point4, (point4[0], point4[1] - l) , c)
def draw_rects(img, rects, color):
    if rects:
        for i in rects:
            # 画一个绿色的矩形框
            cv.Rectangle(img, (int(rects[0][0]), int(rects[0][1])),
                         (int(rects[0][2]), int(rects[0][3])),
                         cv.CV_RGB(0, 255, 0), 1, 8, 0)
Example #14
0
    def add_keypoints(self, roi, ignore_distance=False):
        # Begin with a mask of all black pixels
        mask = np.zeros_like(self.grey)

        # Get the coordinates and dimensions of the currently ROI
        try:
            ((x, y), (w, h), a) = roi
        except:
            try:
                x, y, w, h = roi
                a = 0
            except:
                rospy.loginfo("ROI has shrunk to zero...")
                return

        x = int(x)
        y = int(y)

        # Expand the ROI to look for new keypoints
        w_new = int(self.expand_roi * w)
        h_new = int(self.expand_roi * h)

        pt1 = (x - int(w_new / 2), y - int(h_new / 2))
        pt2 = (x + int(w_new / 2), y + int(h_new / 2))

        mask_box = ((x, y), (w_new, h_new), a)

        # Display the expanded ROI with a yellow rectangle
        if self.show_add_drop:
            cv2.rectangle(self.marker_image, pt1, pt2, cv.RGB(255, 255, 0))

        # Create a filled white ellipse within the mask box to define the ROI
        cv2.ellipse(mask, mask_box, cv.CV_RGB(255, 255, 255), cv.CV_FILLED)

        if self.keypoints is not None:
            # Mask the current keypoints
            for x, y in [np.int32(p) for p in self.keypoints]:
                cv2.circle(mask, (x, y), 5, 0, -1)

        new_keypoints = cv2.goodFeaturesToTrack(self.grey,
                                                mask=mask,
                                                **self.gf_params)

        # Append new keypoints to the current list if they are not
        # too far from the current cluster
        if new_keypoints is not None:
            for x, y in np.float32(new_keypoints).reshape(-1, 2):
                if ignore_distance:
                    self.keypoints.append((x, y))
                else:
                    distance = self.distance_to_cluster((x, y), self.keypoints)
                    if distance > self.add_keypoint_distance:
                        self.keypoints.append((x, y))
                        # Briefly display a blue disc where the new point is added
                        if self.show_add_drop:
                            cv2.circle(self.marker_image, (x, y), 3,
                                       (255, 255, 0, 0), cv.CV_FILLED, 2, 0)

            # Remove duplicate keypoints
            self.keypoints = list(set(self.keypoints))
Example #15
0
def update_mhi(img, dst, diff_threshold):
    global last
    global mhi
    global storage
    global mask
    global orient
    global segmask
    timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds
    size = cv.GetSize(img) # get current frame size
    idx1 = last
    if not mhi or cv.GetSize(mhi) != size:
        for i in range(N):
            buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
            cv.Zero(buf[i])
        mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        cv.Zero(mhi) # clear MHI at the beginning
        orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1)

    cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale
    idx2 = (last + 1) % N # index of (last - (N-1))th frame
    last = idx2
    silh = buf[idx2]
    cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames
    cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it
    cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI
    cv.CvtScale(mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION)
    cv.Zero(dst)
    cv.Merge(mask, None, None, None, dst)
    cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3)
    if not storage:
        storage = cv.CreateMemStorage(0)
    seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA)
    for (area, value, comp_rect) in seq:
        if comp_rect[2] + comp_rect[3] > 100: # reject very small components
            color = cv.CV_RGB(255, 0,0)
            silh_roi = cv.GetSubRect(silh, comp_rect)
            mhi_roi = cv.GetSubRect(mhi, comp_rect)
            orient_roi = cv.GetSubRect(orient, comp_rect)
            mask_roi = cv.GetSubRect(mask, comp_rect)
            angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)

            count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI
            if count < (comp_rect[2] * comp_rect[3] * 0.05):
                continue

            magnitude = 30.
            center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2))
            cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0)
            cv.Line(dst,
                    center,
                    (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)),
                     cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))),
                    color,
                    3,
                    cv.CV_AA,
                    0)
Example #16
0
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = True

        while True:
            frame = cv.QueryFrame(self.capture)

            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.CalcArrBackProject([self.hue], backproject, hist)

            # Run the cam-shift (if the a window is set and != 0)
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                (iters, (area, value, rect),
                 track_box) = cv.CamShift(backproject, self.track_window,
                                          crit)  #Call the camshift !!
                self.track_window = rect  #Put the current rectangle as the tracked area

            # If mouse is pressed, highlight the current selected rectangle and recompute histogram
            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)  #Get specified area

                #Make the effect of background shadow when selecting a window
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)

                #Draw temporary rectangle
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                #Take the same area but in hue image to calculate histogram
                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)

                #Used to rescale the histogram with the max value (to draw it later on)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)

            elif self.track_window and is_rect_nonzero(
                    self.track_window):  #If window set draw an elipseBox
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3,
                              cv.CV_AA, 0)

            cv.ShowImage("CamShiftDemo", frame)
            cv.ShowImage("Backprojection", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
Example #17
0
    def show_liner(self):
        size = cv.GetSize(self.image)
        # B&W background but color highlighting
        gray_img = cv.CreateImage(size, 8, 1)
        draw_img = cv.CreateImage(size, 8, 3)
        #cv.Copy(self.image, draw_img)
        cv.CvtColor(self.image, gray_img, cv.CV_BGR2GRAY)
        cv.CvtColor(gray_img, draw_img, cv.CV_GRAY2BGR)
        '''Takes in list of (contour, color) tuples where contour is iterable for (x, y) tuples'''
        cv.PolyLine(draw_img, [self.best_contour], True, cv.CV_RGB(255, 0, 0))
        cv.PolyLine(draw_img, [self.ref_polygon], True, cv.CV_RGB(0, 0, 255))
        print(self.line)
        cv.PolyLine(draw_img, [self.line], True, cv.CV_RGB(0, 255, 0))

        cv.ShowImage("Contours", draw_img)
        cv.WaitKey()
        sys.exit(1)
Example #18
0
    def initialize(self, frame):
        # Initialize
        # log_file_name = "tracker_output.log"
        # log_file = file( log_file_name, 'a' )
        
        print  str(type(frame))
        print "resize to ::: " + str(cv.GetSize(frame)) + " " +  str(type(frame))
        (w, h) = cv.GetSize(frame)
#         gray = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        size = (w, h) #cv.GetSize(frame)#(300 , 300)
        self.thumbnail = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
       
        self.grey_average_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
        self.grey_original_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
#         cv.CvtColor(display_image, gray, cv.CV_RGB2GRAY)
#         prev_image = gray
        
        # Greyscale image, thresholded to create the motion mask:
        self.grey_image = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
        
        
        
        # The RunningAvg() function requires a 32-bit or 64-bit image...
        self.running_average_image = cv.CreateImage(size, cv.IPL_DEPTH_32F, 3)
        
        # ...but the AbsDiff() function requires matching image depths:
        self.running_average_in_display_color_depth = cv.CloneImage(self.thumbnail)
        
        # RAM used by FindContours():
        self.mem_storage = cv.CreateMemStorage(0)
        
        # The difference between the running average and the current frame:
        self.difference = cv.CloneImage(self.thumbnail)
        
        self.target_count = 1
        self.last_target_count = 1
        self.last_target_change_t = 0.0
        self.k_or_guess = 1
        self.codebook = []
       
        self.last_frame_entity_list = []
        
        self.frame_count = 0
        
        # For toggling display:
        image_list = [ "camera", "difference", "threshold", "display", "faces" ]
        image_index = 3  # Index into image_list
    
    
        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        
        # Set this to the max number of targets to look for (passed to k-means):
        self.max_targets = 5
Example #19
0
    def run(self):
        hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)
        backproject_mode = False
        while True:
            frame = cv.QueryFrame(self.capture)
            cv.Flip(frame, frame, 0)
            # Convert to HSV and keep the hue
            hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
            cv.CvtColor(frame, frame, cv.CV_RGB2BGR)
            cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
            #cv.CvtColor(frame, hsv, cv.CV_RGB2HSV)
            self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
            cv.Split(hsv, self.hue, None, None, None)

            # Compute back projection
            backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

            # Run the cam-shift
            cv.CalcArrBackProject([self.hue], backproject, hist)
            if self.track_window and is_rect_nonzero(self.track_window):
                crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
                (iters, (area, value, rect),
                 track_box) = cv.CamShift(backproject, self.track_window, crit)
                self.track_window = rect

            # If mouse is pressed, highlight the current selected rectangle
            # and recompute the histogram

            if self.drag_start and is_rect_nonzero(self.selection):
                sub = cv.GetSubRect(frame, self.selection)
                save = cv.CloneMat(sub)
                cv.ConvertScale(frame, frame, 0.5)
                cv.Copy(save, sub)
                x, y, w, h = self.selection
                cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

                sel = cv.GetSubRect(self.hue, self.selection)
                cv.CalcArrHist([sel], hist, 0)
                (_, max_val, _, _) = cv.GetMinMaxHistValue(hist)
                if max_val != 0:
                    cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
            elif self.track_window and is_rect_nonzero(self.track_window):
                cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3,
                              cv.CV_AA, 0)

            if not backproject_mode:
                cv.ShowImage("CamShiftDemo", frame)
            else:
                cv.ShowImage("CamShiftDemo", backproject)
            cv.ShowImage("Histogram", self.hue_histogram_as_image(hist))

            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
            elif c == ord("b"):
                backproject_mode = not backproject_mode
def on_mouse(event, x, y, flags, param):

    if (not color_img):
        return

    if event == cv.CV_EVENT_LBUTTONDOWN:
        my_mask = None
        seed = (x, y)
        if ffill_case == 0:
            lo = up = 0
            flags = connectivity + (new_mask_val << 8)
        else:
            lo = lo_diff
            up = up_diff
            flags = connectivity + (
                new_mask_val << 8) + cv.CV_FLOODFILL_FIXED_RANGE
        b = random.randint(0, 255)
        g = random.randint(0, 255)
        r = random.randint(0, 255)

        if (is_mask):
            my_mask = mask
            cv.Threshold(mask, mask, 1, 128, cv.CV_THRESH_BINARY)

        if (is_color):

            color = cv.CV_RGB(r, g, b)
            comp = cv.FloodFill(color_img, seed, color, cv.CV_RGB(lo, lo, lo),
                                cv.CV_RGB(up, up, up), flags, my_mask)
            cv.ShowImage("image", color_img)

        else:

            brightness = cv.RealScalar((r * 2 + g * 7 + b + 5) / 10)
            comp = cv.FloodFill(gray_img, seed, brightness, cv.RealScalar(lo),
                                cv.RealScalar(up), flags, my_mask)
            cv.ShowImage("image", gray_img)

        print "%g pixels were repainted" % comp[0]

        if (is_mask):
            cv.ShowImage("mask", mask)
Example #21
0
def minarea_seq(img, count, storage):
    points = [(randint(img.width / 4, img.width * 3 / 4),
               randint(img.height / 4, img.height * 3 / 4))
              for i in range(count)]
    cv.Zero(img)

    for p in points:
        cv.Circle(img, roundxy(p), 2, cv.CV_RGB(255, 0, 0), cv.CV_FILLED,
                  cv.CV_AA, 0)

    draw_common(points)
Example #22
0
def transformation(imCalRGB, calData, tx1, ty1, tx2, ty2, tx3, ty3, tx4, ty4):

    points = calData.points

    ## sectors are sometimes different -> make accessible
    # used when line rectangle intersection at specific segment is used for transformation:
    newtop = destinationPoint(calData.dstpoints[0], calData)
    newbottom = destinationPoint(calData.dstpoints[1], calData)
    newleft = destinationPoint(calData.dstpoints[2], calData)
    newright = destinationPoint(calData.dstpoints[3], calData)

    # get a fresh new image
    new_image = imCalRGB.copy()

    # create transformation matrix
    src = np.array([(points[0][0] + tx1, points[0][1] + ty1),
                    (points[1][0] + tx2, points[1][1] + ty2),
                    (points[2][0] + tx3, points[2][1] + ty3),
                    (points[3][0] + tx4, points[3][1] + ty4)], np.float32)
    dst = np.array([newtop, newbottom, newleft, newright], np.float32)
    transformation_matrix = cv2.getPerspectiveTransform(src, dst)

    new_image = cv2.warpPerspective(new_image, transformation_matrix,
                                    (800, 800))

    # draw image
    drawBoard = Draw()
    new_image = drawBoard.drawBoard(new_image, calData)

    cv2.circle(new_image, (int(newtop[0]), int(newtop[1])), 2,
               cv.CV_RGB(255, 255, 0), 2, 4)
    cv2.circle(new_image, (int(newbottom[0]), int(newbottom[1])), 2,
               cv.CV_RGB(255, 255, 0), 2, 4)
    cv2.circle(new_image, (int(newleft[0]), int(newleft[1])), 2,
               cv.CV_RGB(255, 255, 0), 2, 4)
    cv2.circle(new_image, (int(newright[0]), int(newright[1])), 2,
               cv.CV_RGB(255, 255, 0), 2, 4)

    cv2.imshow('manipulation', new_image)

    return transformation_matrix
Example #23
0
def create(src, id, pos, time):
    global hands
    ponto = depth.to_projective([pos])
    centro = (int(ponto[0][0]), int(ponto[0][1]))
    hands[id] = {
        'current_position': centro,
        'drawing': False,
        'color': {
            'name': 'Choose a Color',
            'cv': cv.CV_RGB(255, 255, 255)
        }
    }
Example #24
0
def minarea_array(img, count):
    pointMat = cv.CreateMat(count, 1, cv.CV_32SC2)
    for i in range(count):
        pointMat[i, 0] = (randint(img.width / 4, img.width * 3 / 4),
                          randint(img.height / 4, img.height * 3 / 4))

    cv.Zero(img)

    for i in range(count):
        cv.Circle(img, roundxy(pointMat[i, 0]), 2, cv.CV_RGB(255, 0, 0),
                  cv.CV_FILLED, cv.CV_AA, 0)

    draw_common(pointMat)
Example #25
0
def get_features(Img):

    img = cv2.imread(Img)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # 读取灰度图
    ret, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
    (Center_x, Center_y) = img_center(binary, 0, 0)  # 整幅图的重心
    leng_diagonal = math.sqrt(gray.shape[0] * gray.shape[0] +
                              gray.shape[1] * gray.shape[1])
    # print "Center_x",Center_x
    List = Canny(Img)  # 返回轮廓信息,(轮廓矩形框信息,边界点集合)
    #     List = check_merge(Img)
    #     for i in List:
    #         print "i",i
    #     raw_input("i")
    feature = []
    # print "Img",Img
    # print "LIst from canny is",List
    for iterm in List:
        '''
        [
        (1, 33, 386, 34), (1, 33, 385, 1), array([  [  [  1,  33]  ],

       [ [385,  33] ]   ])
        ]
        '''
        cv2.rectangle(gray, iterm[0][:2], iterm[0][2:], cv.CV_RGB(255, 0, 0),
                      2)
        # print "iterm[]",iterm[1],iterm[0][1],iterm[0][3]
        # 截取子图像矩阵 #~~here
        SubImg = gray[iterm[0][1]:iterm[0][3], iterm[0][0]:iterm[0][2]]
        SubBin = binary[iterm[0][1]:iterm[0][3],
                        iterm[0][0]:iterm[0][2]]  # 截取二值子图像矩阵
        Sub_x, Sub_y = img_center(SubBin, iterm[0][0], iterm[0][1])  # 子图像重心坐标
        # print "Sub_x,Sub_y is",Sub_x,Sub_y
        Distance = math.sqrt(
            (Sub_x - Center_x) * (Sub_x - Center_x) + (Sub_y - Center_y) *
            (Sub_y - Center_y))  # 分块重心与整幅图像重心间的距离
        # print "Distance is",Distance,Distance/leng_diagonal
        # 与重心间距离相对对角线的比例
        angle = img_angle(Sub_y, Sub_x)  # 子图重心相对整幅图像顶点的夹角
        # print "tan is",angle
        Sub_entory = Entory(SubImg)  # 计算分块子图的信息熵
        Sub_eccentricity = img_eccentricity(SubBin)  # 计算分块的偏心率
        # print "iterm[2] is",iterm
        Sub_circularity = Subimg_circularity(Sub_x, Sub_y, iterm[2])  # 分块的圆形性
        Sub_hu = img_hu(SubBin)  # 分块的Hu矩
        feature.append([])
        feature[-1] = (iterm[1], (Sub_x, Sub_y), Distance, angle,
                       Distance / leng_diagonal, Sub_entory, Sub_eccentricity,
                       Sub_circularity, Sub_hu)
    return feature
    def detect_and_draw(self, imgmsg):
        if self.pause:
            return
        # frame = cv.QueryFrame( self.capture )
        frame = self.br.imgmsg_to_cv(imgmsg, "bgr8")

        # Convert to HSV and keep the hue
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.Split(hsv, self.hue, None, None, None)

        # Compute back projection
        backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)

        # Run the cam-shift
        cv.CalcArrBackProject([self.hue], backproject, self.hist)
        if self.track_window and is_rect_nonzero(self.track_window):
            crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
            (iters, (area, value, rect),
             track_box) = cv.CamShift(backproject, self.track_window, crit)
            self.track_window = rect
            x, y, w, h = rect
            self.bbpub.publish(RegionOfInterest(x, y, w, h, False))
            proba_msg = self.br.cv_to_imgmsg(backproject)
            proba_msg.header = imgmsg.header
            self.bppub.publish(proba_msg)

        # If mouse is pressed, highlight the current selected rectangle
        # and recompute the histogram

        if self.drag_start and is_rect_nonzero(self.selection):
            sub = cv.GetSubRect(frame, self.selection)
            save = cv.CloneMat(sub)
            cv.ConvertScale(frame, frame, 0.5)
            cv.Copy(save, sub)
            x, y, w, h = self.selection
            cv.Rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255))

            sel = cv.GetSubRect(self.hue, self.selection)
            cv.CalcArrHist([sel], self.hist, 0)
            (_, max_val, _, _) = cv.GetMinMaxHistValue(self.hist)
            if max_val != 0:
                cv.ConvertScale(self.hist.bins, self.hist.bins, 255. / max_val)
        elif self.track_window and is_rect_nonzero(self.track_window):
            cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA,
                          0)

        self.frame = frame
        self.backproject = backproject
Example #27
0
def camshift(x, y, w, h, selection):
    print "Performing camshift with x:{} y:{} w:{} h:{}".format(x, y, w, h)
    print selection
    hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0, 180)], 1)

    while True:
        print "entered loop"
        #camshift termination criteria (10 iterations without movement of 1 pixel ends camshift)

        frame = cv.QueryFrame(cap)
        cv.Flip(frame, frame, 1)

        #print "switching to HSV"
        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
        hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.Split(hsv, hue, None, None, None)

        #compute back projection
        # print "back projection"
        backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)
        cv.CalcArrBackProject([hue], backproject, hist)

        #run the camshift
        #print "camshift"
        print "Selection"
        #pdb.set_trace()
        print selection
        crit = (cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
        (iters, (area, value, rect),
         track_box) = cv.CamShift(backproject, selection, crit)
        print "rect"
        print rect
        if rect[0] > 0 and rect[1] > 0:
            selection = rect
        print "SelectionNew"
        print selection
        print "track_box"
        print track_box

        #draw the surrounding ellipse
        # print "ellipse"
        cv.EllipseBox(frame, track_box, cv.CV_RGB(255, 0, 0), 3, cv.CV_AA, 0)

        #draw image
        #print "drawing image"
        cv.ShowImage("CamShift", frame)
        if cv.WaitKey(1) & 0xFF == ord('q'):
            break
Example #28
0
def hog_visualize(hog_image_v, integral_img, cell):
    hog_feat= [0] * 9
    width_img,height_img = cv.GetSize(hog_image_v)
    halfcell = cell/2
    num_cells_w,num_cells_h = width_img/cell,height_img/cell
    norient = integral_img.shape[2]
    mid = norient/2
    for y in xrange(num_cells_h-1):
        for x in xrange(num_cells_w-1):
            px,py=x*cell,y*cell
            #features = integral_hog_window(integral_img, (px,py,max(px+8, width_img-1),max(py+8, height_img-1)))
            features = integral_hog_window(integral_img, (px, py, px+cell, py+cell))
            hog_feat = hog_feat + list(features)
            px += halfcell
            py += halfcell

            
            #L1-norm, nice for visualization
            total = np.sum(features)
            maximum_value_feature = np.max(features)
            if total > 1e-3:
                normalized = features/maximum_value_feature
                N = norient
                final = []
                for i in xrange(N):
                    maximum_orient = normalized.argmax()
                    valmax = normalized[maximum_orient]
                    x1 = int(round(valmax*halfcell*np.sin(np.deg2rad(45*(maximum_orient-4)))))
                    y1 = int(round(valmax*halfcell*np.cos(np.deg2rad(45*(maximum_orient-4)))))
                    gradient_val = int(round(255*features[maximum_orient]/total))
                    #print "values of x1 =",x1,"and y1=",y1, "and gv=",gradient_val

                    #don't draw if less than a threshold
                    if gradient_val < 30:
                        break
                    final.insert(0, (x1,y1,gradient_val))
                    normalized[maximum_orient] = 0.
                    
                #draw from smallest to highest gradient magnitude
                for i in xrange(len(final)):
                    x1,y1,gradient_val = final[i]
                    cv.Line(hog_image_v, (px-x1,py+y1), (px+x1,py-y1), cv.CV_RGB(gradient_val, gradient_val, gradient_val), 1, 8)
            else:
                #don't draw if there's no reponse
                pass
    return hog_feat
Example #29
0
def drawSquares(img, squares):
    cpy = cv.CloneImage(img)
    # read 4 sequence elements at a time (all vertices of a square)
    i=0
    while i<squares.total:
        pt = []
        # read 4 vertices
        pt.append(squares[i])
        pt.append(squares[i+1])
        pt.append(squares[i+2])
        pt.append(squares[i+3])

        # draw the square as a closed polyline
        cv.PolyLine(cpy, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv. CV_AA, 0)
        i+=4

    # show the resultant image
    cv.ShowImage(wndname, cpy)
Example #30
0
def update_video_with(image):
    cv.SetData(cv_image, image)
    if hands:
        if hands[1]['drawing']:
            update_notification_with('Click to Stop Drawing')
        else:
            update_notification_with('Click to Start Drawing')

        for id in hands:
            cv.PutText(cv_image, hands[id]['color']['name'],
                       hands[id]['current_position'], text_font,
                       cv.CV_RGB(255, 255, 255))
    else:
        update_notification_with('Wave to Interact')
    for button in buttons:
        cv.Rectangle(cv_image, buttons[button]['start'],
                     buttons[button]['end'], buttons[button]['color'], -1,
                     cv.CV_AA, 0)
    cv.ShowImage('Video', cv_image)