コード例 #1
0
    def determineMarkerOrientation(self, frame):
        (xm, ym) = self.lastMarkerLocation
        realval = cv.Get2D(self.frameReal, ym, xm)[0]
        imagval = cv.Get2D(self.frameImag, ym, xm)[0]
        self.orientation = (math.atan2(-realval, imagval) -
                            math.pi / 2) / self.order

        maxValue = 0
        maxOrient = 0
        searchDist = self.kernelSize / 3
        for k in range(self.order):
            orient = self.orientation + 2 * k * math.pi / self.order
            xm2 = int(xm + searchDist * math.cos(orient))
            ym2 = int(ym + searchDist * math.sin(orient))
            if (xm2 > 0 and ym2 > 0 and xm2 < frame.width
                    and ym2 < frame.height):
                try:
                    intensity = cv.Get2D(frame, ym2, xm2)
                    if (intensity[0] > maxValue):
                        maxValue = intensity[0]
                        maxOrient = orient
                except:
                    print("determineMarkerOrientation: error: %d %d %d %d" %
                          (ym2, xm2, frame.width, frame.height))
                    pass

        self.orientation = self.limitAngleToRange(maxOrient)
コード例 #2
0
    def determineMarkerQuality_naive(self, frame_org):

        phase = np.exp((self.limitAngleToRange(-self.orientation)) * 1j)

        t1_temp = self.kernelComplex * np.power(phase, self.order)
        t1 = t1_temp.real > self.threshold

        t2_temp = self.kernelComplex * np.power(phase, self.order)
        t2 = t2_temp.real < -self.threshold

        img_t1_t2_diff = t1.astype(np.float32) - t2.astype(np.float32)

        angleThreshold = 3.14 / (2 * self.order)

        t3 = np.angle(self.KernelRemoveArmComplex * phase) < angleThreshold
        t4 = np.angle(self.KernelRemoveArmComplex * phase) > -angleThreshold
        mask = 1 - 2 * (t3 & t4)

        template = (img_t1_t2_diff) * mask
        template = cv.fromarray(1 - template)

        (xm, ym) = self.lastMarkerLocation

        y1 = ym - int(math.floor(float(self.kernelSize / 2)))
        y2 = ym + int(math.ceil(float(self.kernelSize / 2)))

        x1 = xm - int(math.floor(float(self.kernelSize / 2)))
        x2 = xm + int(math.ceil(float(self.kernelSize / 2)))

        try:
            frame = frame_org[y1:y2, x1:x2]
        except (TypeError):
            self.quality = 0
            return
        w, h = cv.GetSize(frame)
        im_dst = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        cv.Threshold(frame, im_dst, 128, 1, cv.CV_THRESH_BINARY)

        matches = 0
        blacks = 0
        w, h = cv.GetSize(im_dst)
        for x in xrange(w):
            for y in xrange(h):
                if cv.Get2D(im_dst, y, x)[0] == 0:  # if pixel is black
                    blacks += 1
                    if cv.Get2D(im_dst, y, x)[0] == cv.Get2D(template, y,
                                                             x)[0]:
                        matches += 1
                else:
                    continue

#	self.quality = float(matches)/(w*h)
        self.quality = float(matches) / blacks

        im_dst = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        cv.Threshold(frame, im_dst, 115, 255, cv.CV_THRESH_BINARY)

        cv.ShowImage("small_image", im_dst)
        cv.ShowImage("temp_kernel", template)
コード例 #3
0
ファイル: tracking.py プロジェクト: clementlandrin/Fund_VAR_1
def getObjectHSV(event, x, y, flags, image):
    # click routine on webcam input
    global hsvmouse
    if event == cv.CV_EVENT_LBUTTONDOWN:
        pixel = cv.Get2D(hsv_image, y, x)
        pixelrgb = cv.Get2D(rgb_image, y, x)
        hsvmouse = pixel
        print "Pixel color (HSV): "
        print hsvmouse
def Color_callibration(capture):
    vals = []
    bgr = []
    mini = [255, 255, 255]
    maxi = [0, 0, 0]
    cv.NamedWindow("BGR", 0)
    print 'Please Put Your color in the circular area.Press ESC to start Callibration:'
    while 1:
        image = cv.QueryFrame(capture)
        cv.Flip(image, image, 1)
        cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4)
        cv.ShowImage("BGR", image)
        c = cv.WaitKey(33)
        if c == 27:
            break
    print 'Starting Callibration...Analyzing the Object...'
    for i in range(0, 100):
        image = cv.QueryFrame(capture)
        cv.Flip(image, image, 1)
        cv.Smooth(image, image, cv.CV_MEDIAN, 3, 0)
        imagehsv = cv.CreateImage(cv.GetSize(image), 8, 3)
        cv.CvtColor(image, imagehsv, cv.CV_BGR2YCrCb)
        vals = cv.Get2D(imagehsv, 300, 200)
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8)
        cv.PutText(
            image,
            "  " + str(vals[0]) + "," + str(vals[1]) + "," + str(vals[2]),
            (200, 300), font, (55, 25, 255))
        for j in range(0, 3):
            if (vals[j] < mini[j]): mini[j] = vals[j]
            if (vals[j] > maxi[j]): maxi[j] = vals[j]
        cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4)
        cv.ShowImage("BGR", image)
        c = cv.WaitKey(33)
        if c == 27:
            break
    print 'Analyzation Completed'
    mini[0] -= 35
    mini[1] -= 15
    mini[2] -= 15
    maxi[0] += 35
    maxi[1] += 15
    maxi[2] += 15
    for i in range(0, 3):
        if (mini[i] < 0):
            mini[i] = 0
        if (maxi[i] > 255):
            maxi[i] = 255
    cv.DestroyWindow("BGR")
    bgr = (mini, maxi)
    return bgr
コード例 #5
0
def get_color(x, y):
    img = cv2.VideoCapture(-1)
    img.set(3,SCREEN_WIDTH)
    img.set(4,SCREEN_HIGHT)

    _, bgr_image = img.read()

    orig_image = bgr_image

    hsv_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV)

    hsv_value = cv.Get2D(hsv_image,x,y)

    print hsv_value
コード例 #6
0
 def determineMarkerQuality(self):
     (xm, ym) = self.lastMarkerLocation
     realval = cv.Get2D(self.frameReal, ym, xm)[0]
     imagval = cv.Get2D(self.frameImag, ym, xm)[0]
     realvalThirdHarmonics = cv.Get2D(self.frameRealThirdHarmonics, ym,
                                      xm)[0]
     imagvalThirdHarmonics = cv.Get2D(self.frameImagThirdHarmonics, ym,
                                      xm)[0]
     argumentPredicted = 3 * math.atan2(-realval, imagval)
     argumentThirdHarmonics = math.atan2(-realvalThirdHarmonics,
                                         imagvalThirdHarmonics)
     argumentPredicted = self.limitAngleToRange(argumentPredicted)
     argumentThirdHarmonics = self.limitAngleToRange(argumentThirdHarmonics)
     difference = self.limitAngleToRange(argumentPredicted -
                                         argumentThirdHarmonics)
     strength = math.sqrt(realval * realval + imagval * imagval)
     strengthThirdHarmonics = math.sqrt(
         realvalThirdHarmonics * realvalThirdHarmonics +
         imagvalThirdHarmonics * imagvalThirdHarmonics)
     #print("Arg predicted: %5.2f  Arg found: %5.2f  Difference: %5.2f" % (argumentPredicted, argumentThirdHarmonics, difference))
     #print("angdifferenge: %5.2f  strengthRatio: %8.5f" % (difference, strengthThirdHarmonics / strength))
     # angdifference \in [-0.2; 0.2]
     # strengthRatio \in [0.03; 0.055]
     self.quality = math.exp(-math.pow(difference / 0.3, 2))
コード例 #7
0
def autocalibrate(orig, storage):

    circles = np.asarray(storage)
    #print 'drawing: ' + str(len(circles)) + ' circles'

    min_value = 255
    max_value = 0
    s = []
    for circle in circles:
        Radius, x, y = int(circle[0][2]), int(circle[0][0]), int(circle[0][1])
        processed = cv.CreateImage(cv.GetSize(orig), 8, 3)
        cv.CvtColor(orig, processed, cv.CV_BGR2HSV)
        s.append(cv.Get2D(processed, y, x))

    return s

    #cropped  = cv.CreateImage((Radius/2,Radius/2),8,3)
    #sub = cv.GetSubRect(orig,(x,y,Radius/2,Radius/2))
    #cv.Copy(sub,cropped)
    #cv.ShowImage('cropped',cropped)
    #hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 )
    """cv.CalcHist(cropped,hist)
コード例 #8
0
def cut(disparity, image, threshold):
    for i in range(0, image.height):
        for j in range(0, image.width):
            # keep closer object
            if cv.GetReal2D(disparity, i, j) > threshold:
                cv.Set2D(disparity, i, j, cv.Get2D(image, i, j))
コード例 #9
0
    def drop_keypoints(self, min_keypoints, outlier_threshold, mse_threshold):
        sum_x = 0
        sum_y = 0
        sum_z = 0
        sse = 0
        keypoints_xy = self.keypoints
        keypoints_z = self.keypoints
        n_xy = len(self.keypoints)
        n_z = n_xy

        if self.use_depth_for_tracking:
            if self.depth_image is None:
                return ((0, 0, 0), 0, 0, -1)

        # If there are no keypoints left to track, start over
        if n_xy == 0:
            return ((0, 0, 0), 0, 0, -1)

        # Compute the COG (center of gravity) of the cluster
        for point in self.keypoints:
            sum_x = sum_x + point[0]
            sum_y = sum_y + point[1]

        mean_x = sum_x / n_xy
        mean_y = sum_y / n_xy

        if self.use_depth_for_tracking:
            for point in self.keypoints:
                try:
                    z = cv.Get2D(self.depth_image,
                                 min(self.frame_height - 1, int(point[1])),
                                 min(self.frame_width - 1, int(point[0])))
                except:
                    continue
                z = z[0]
                # Depth values can be NaN which should be ignored
                if isnan(z):
                    continue
                else:
                    sum_z = sum_z + z

            mean_z = sum_z / n_z

        else:
            mean_z = -1

        # Compute the x-y MSE (mean squared error) of the cluster in the camera plane
        for point in self.keypoints:
            sse = sse + (point[0] - mean_x) * (point[0] - mean_x) + (
                point[1] - mean_y) * (point[1] - mean_y)
            #sse = sse + abs((point[0] - mean_x)) + abs((point[1] - mean_y))

        # Get the average over the number of feature points
        mse_xy = sse / n_xy

        # The MSE must be > 0 for any sensible feature cluster
        if mse_xy == 0 or mse_xy > mse_threshold:
            return ((0, 0, 0), 0, 0, -1)

        # Throw away the outliers based on the x-y variance
        max_err = 0
        for point in self.keypoints:
            std_err = ((point[0] - mean_x) * (point[0] - mean_x) +
                       (point[1] - mean_y) * (point[1] - mean_y)) / mse_xy
            if std_err > max_err:
                max_err = std_err
            if std_err > outlier_threshold:
                keypoints_xy.remove(point)
                if self.show_add_drop:
                    # Briefly mark the removed points in red
                    cv2.circle(self.marker_image, (point[0], point[1]), 3,
                               (0, 0, 255), cv.CV_FILLED, 2, 0)
                try:
                    keypoints_z.remove(point)
                    n_z = n_z - 1
                except:
                    pass

                n_xy = n_xy - 1

        # Now do the same for depth
        if self.use_depth_for_tracking:
            sse = 0
            for point in keypoints_z:
                try:
                    z = cv.Get2D(self.depth_image,
                                 min(self.frame_height - 1, int(point[1])),
                                 min(self.frame_width - 1, int(point[0])))
                    z = z[0]
                    sse = sse + (z - mean_z) * (z - mean_z)
                except:
                    n_z = n_z - 1

            if n_z != 0:
                mse_z = sse / n_z
            else:
                mse_z = 0

            # Throw away the outliers based on depth using percent error
            # rather than standard error since depth values can jump
            # dramatically at object boundaries
            for point in keypoints_z:
                try:
                    z = cv.Get2D(self.depth_image,
                                 min(self.frame_height - 1, int(point[1])),
                                 min(self.frame_width - 1, int(point[0])))
                    z = z[0]
                except:
                    continue
                try:
                    pct_err = abs(z - mean_z) / mean_z
                    if pct_err > self.pct_err_z:
                        keypoints_xy.remove(point)
                        if self.show_add_drop:
                            # Briefly mark the removed points in red
                            cv2.circle(self.marker_image, (point[0], point[1]),
                                       2, (0, 0, 255), cv.CV_FILLED)
                except:
                    pass
        else:
            mse_z = -1

        self.keypoints = keypoints_xy

        # Consider a cluster bad if we have fewer than min_keypoints left
        if len(self.keypoints) < min_keypoints:
            score = -1
        else:
            score = 1

        return ((mean_x, mean_y, mean_z), mse_xy, mse_z, score)
コード例 #10
0
    def run(self):
        # Initialize
        # log_file_name = "tracker_output.log"
        # log_file = file( log_file_name, 'a' )
        
        print "hello"
        
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)
        
        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame(self.capture)
        
        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        
        
        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        
        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage(display_image)
        
        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)
        
        # The difference between the running average and the current frame:
        difference = cv.CloneImage(display_image)
        
        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook = []
        frame_count = 0
        last_frame_entity_list = []
        
        t0 = time.time()
        
        # For toggling display:
        image_list = [ "camera", "difference", "threshold", "display", "faces" ]
        image_index = 3  # Index into image_list
    
    
        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        ###############################
        # ## Face detection stuff
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
        haar_cascade = cv.Load('E:\\Softwares\\opencv\\data\\haarcascades\\haarcascade_frontalface_alt.xml')
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_eye.xml' )
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
        # haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )
        
        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 5
        
        while True:
            
            # Capture frame from webcam
            camera_image = cv.QueryFrame(self.capture)
            
            frame_count += 1
            frame_t0 = time.time()
            
            # Create an image with interactive feedback:
            display_image = cv.CloneImage(camera_image)
            
            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage(display_image)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)
            
            # Use the Running Average as the static background            
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg(color_image, running_average_image, 0.320, None)
            
            # Convert the scale of the moving average.
            cv.ConvertScale(running_average_image, running_average_in_display_color_depth, 1.0, 0.0)
            
            # Subtract the current frame from the moving average.
            cv.AbsDiff(color_image, running_average_in_display_color_depth, difference)
            
            # Convert the image to greyscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Threshold the image to a black and white motion mask:
            cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY)
            # Smooth and threshold again to eliminate "sparkles"
            cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0)
            cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)
            
            grey_image_as_array = numpy.asarray(cv.GetMat(grey_image))
            non_black_coords_array = numpy.where(grey_image_as_array > 3)
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip(non_black_coords_array[1], non_black_coords_array[0])
            
            points = []  # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []

            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours(grey_image, mem_storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            
            while contour:
                
                bounding_rect = cv.BoundingRect(list(contour))
                point1 = (bounding_rect[0], bounding_rect[1])
                point2 = (bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3])
                
                bounding_box_list.append((point1, point2))
                polygon_points = cv.ApproxPoly(list(contour), mem_storage, cv.CV_POLY_APPROX_DP)
                
                # To track polygon points only (instead of every pixel):
                # points += list(polygon_points)
                
                # Draw the contours:
                # ##cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
                cv.FillPoly(grey_image, [ list(polygon_points), ], cv.CV_RGB(255, 255, 255), 0, 0)
                cv.PolyLine(display_image, [ polygon_points, ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                # cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)

                contour = contour.h_next()
            
            
            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append(box_width * box_height)
                
                # cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)
            
            average_box_area = 0.0
            if len(box_areas): average_box_area = float(sum(box_areas)) / len(box_areas)
            
            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                
                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area * 0.1: trimmed_box_list.append(box)
            
            # Draw the trimmed box list:
            # for box in trimmed_box_list:
            #    cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )
                
            bounding_box_list = merge_collided_bboxes(trimmed_box_list)

            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle(display_image, box[0], box[1], cv.CV_RGB(0, 255, 0), 1)
            
            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len(bounding_box_list)
            
            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.
            
            if frame_t0 - last_target_change_t < .350:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1: estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1: estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0
            
            # Clip to the user-supplied maximum:
            estimated_target_count = min(estimated_target_count, max_targets)
            
            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.
            
            # Using the numpy values directly (treating all pixels as points):    
            points = non_black_coords_array
            center_points = []
            
            if len(points):
                
                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max(estimated_target_count, 1)  # Need at least one target to look for.
                if len(codebook) == estimated_target_count: 
                    k_or_guess = codebook
                
                # points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans(array(points), k_or_guess)
                
                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = (int(center_point[0]), int(center_point[1]))
                    center_points.append(center_point)
                    # cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    # cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)
            
            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.  
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []
                        
            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []
                
                for center_point in center_points:
                    if    center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                        center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :
                        
                        # This point is within the box.
                        center_points_in_box.append(center_point)
                
                # Now see if there are more than one.  If so, merge them.
                if len(center_points_in_box) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])
                    
                    average_x = int(float(sum(x_list)) / len(x_list))
                    average_y = int(float(sum(y_list)) / len(y_list))
                    
                    trimmed_center_points.append((average_x, average_y))
                    
                    # Record that they were removed:
                    removed_center_points += center_points_in_box
                    
                if len(center_points_in_box) == 1:
                    trimmed_center_points.append(center_points_in_box[0])  # Just use it.
            
            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (not center_point in removed_center_points):
                    trimmed_center_points.append(center_point)
            
            # Draw what we found:
            # for center_point in trimmed_center_points:
            #    center_point = ( int(center_point[0]), int(center_point[1]) )
            #    cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #    cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #    cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #    cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)
            
            # Determine if there are any new (or lost) targets:
            actual_target_count = len(trimmed_center_points)
            last_target_count = actual_target_count
            
            # Now build the list of physical entities (objects)
            this_frame_entity_list = []
            
            # An entity is list: [ name, color, last_time_seen, last_known_coords ]
            
            for target in trimmed_center_points:
            
                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}
                
                for entity in last_frame_entity_list:
                    
                    entity_coords = entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]
            
                    distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2))
                    entity_distance_dict[ distance ] = entity
                
                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()
                
                for distance in distance_list:
                    
                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[ distance ]
                    
                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        # print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] )
                        continue
                    
                    # print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1])
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[3] = target  # Update the new location
                    this_frame_entity_list.append(nearest_possible_entity)
                    # log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break
                
                if entity_found == False:
                    # It's a new entity.
                    color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
                    name = hashlib.md5(str(frame_t0) + str(color)).hexdigest()[:6]
                    last_time_seen = frame_t0
                    
                    new_entity = [ name, color, last_time_seen, target ]
                    this_frame_entity_list.append(new_entity)
                    # log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
            
            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.
            
            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    # log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append(entity)
            
            # For next frame:
            last_frame_entity_list = this_frame_entity_list
            
            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20, cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15, cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10, cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point, 5, cv.CV_RGB(c[0], c[1], c[2]), 3)
            
            
            # print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
            
            # Toggle which image to show
#             if chr(c) == 'd':
#                 image_index = ( image_index + 1 ) % len( image_list )
#             
#             image_name = image_list[ image_index ]
#             
#             # Display frame to user
#             if image_name == "camera":
#                 image = camera_image
#                 cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color )
#             elif image_name == "difference":
#                 image = difference
#                 cv.PutText( image, "Difference Image", text_coord, text_font, text_color )
#             elif image_name == "display":
#                 image = display_image
#                 cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color )
#             elif image_name == "threshold":
#                 # Convert the image to color.
#                 cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
#                 image = display_image  # Re-use display image here
#                 cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
#             elif image_name == "faces":
#                 # Do face detection
#                 detect_faces( camera_image, haar_cascade, mem_storage )                
#                 image = camera_image  # Re-use camera image here
#                 cv.PutText( image, "Face Detection", text_coord, text_font, text_color )
#             cv.ShowImage( "Target", image )
                
                
            image1 = display_image
            detect_faces(camera_image, haar_cascade, mem_storage)
            image2 = camera_image
            
            cv.ShowImage("Target 1", image1)
            cv.ShowImage("Target 2", image2)
            
#             if self.writer: 
#                 cv.WriteFrame( self.writer, image );
            
            # log_file.flush()
            
            # If only using a camera, then there is no time.sleep() needed, 
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()
            

            # If reading from a file, put in a forced delay:
            if not self.writer:
                delta_t = frame_t1 - frame_t0
                if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)
                
                
                
                
                
                
            [rows, cols] = cv.GetSize(frame)
            image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, frame.nChannels)        
            cv.Copy(frame, image)
            cv.ShowImage("camera", frame)
            leftImage = cv.CreateImage((image.width, image.height), 8, 1)
            cv.CvtColor(image, leftImage, cv.CV_BGR2GRAY)
              
            frame2 = cv.QueryFrame(self.capture2)
            print type(frame2)
            image2 = cv.CreateImage(cv.GetSize(frame2), cv.IPL_DEPTH_8U, frame2.nChannels)        
            cv.Copy(frame2, image2)
            cv.ShowImage("camera2", frame2)
            rightImage = cv.CreateImage((image2.width, image2.height), 8, 1)
            cv.CvtColor(image2, rightImage, cv.CV_BGR2GRAY)
               
               
            disparity_left = cv.CreateMat(leftImage.height, leftImage.width, cv.CV_16S)
            disparity_right = cv.CreateMat(rightImage.height, rightImage.width, cv.CV_16S)
               
            # data structure initialization
            state = cv.CreateStereoGCState(16, 2)
#             print leftImage.width 
#             print leftImage.height
#             print rightImage.width
#             print rightImage.height
            
            # running the graph-cut algorithm
            cv.FindStereoCorrespondenceGC(leftImage, rightImage, disparity_left, disparity_right, state)
               
            disp_left_visual = cv.CreateMat(leftImage.height, leftImage.width, cv.CV_8U)
            cv.ConvertScale(disparity_left, disp_left_visual, -16);
        #     cv.Save("disparity.pgm", disp_left_visual);  # save the map
        #     
            # cutting the object farthest of a threshold (120)
            cut(disp_left_visual, leftImage, 120)
               
            # cv.NamedWindow('Disparity map', cv.CV_WINDOW_AUTOSIZE)
            cv.ShowImage('Disparity map', disp_left_visual)     
#                 
                
            # minimum value for the intensity
            maxValue = 0
            maxPoint = None
                
            # now for all the moving object centers get the average intensity value
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
#                 cv.Circle(display_image, center_point, 20, cv.CV_RGB(c[0], c[1], c[2]), 1)
#                 cv.Circle(display_image, center_point, 15, cv.CV_RGB(c[0], c[1], c[2]), 1)
#                 cv.Circle(display_image, center_point, 10, cv.CV_RGB(c[0], c[1], c[2]), 2)
#                 cv.Circle(display_image, center_point,  5, cv.CV_RGB(c[0], c[1], c[2]), 3)
                # cv.Avg(arr)
                print center_point
                print type(disp_left_visual)
                print size(disp_left_visual)
                print center_point
                print "value " + str(cv.Get2D(disp_left_visual, center_point[1], center_point[0]))
                value = cv.Get2D(disp_left_visual, center_point[1], center_point[0])[0] + cv.Get2D(disp_left_visual, center_point[1], center_point[0])[1] + cv.Get2D(disp_left_visual, center_point[1], center_point[0])[2]
                if value > maxValue:
                    maxValue = value
                    maxPoint = center_point
            
            print "min value is " + str(maxValue)
            print " min point is " + str(maxPoint)
            
            if maxPoint != None:
                cv.Circle(display_image, maxPoint, 17, cv.CV_RGB(100, 100, 100), 1)
                
                #distance = 1 / maxValue
                # find if it is right or left or in the middle
                
                if maxValue > 100:
                
                    middle = width / 2
                    if maxPoint[0] > middle + 20:
                        message = "look left"
                    elif maxPoint[0] < middle - 20:
                        message = "look right"
                    else:
                        message = "look middle"
                        
                    print "message " + message
                
                
                
                
                
            
        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float(frame_count) / time_delta
        print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta, processed_fps)
コード例 #11
0
import cv2.cv as cv

image = cv2.imread('images/2-result_single1.jpg')

height, width, channels = image.shape

cv2.rectangle(image, (height/2 - 5, width/2 - 5), (height/2 + 5, width/2 + 5), (0, 128, 255), -1)

cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', image)
cv2.waitKey(0)

# Convert BGR to HSV
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

s=cv.Get2D(hsv, height/2, width/2)

print "H:",s[0],"      S:",s[1],"       V:",s[2]

H=hsv.val[0];
S=hsv.val[1];
V=hsv.val[2];

print(image[height/2, width/2])
print(hsv[height/2, width/2])
 Vec3b color = image.at<Vec3b>(Point(x,y));

        bgrPixel.val[0] = rowPtr[j*cn + 0]; // B
        bgrPixel.val[1] = rowPtr[j*cn + 1]; // G
        bgrPixel.val[2] = rowPtr[j*cn + 2]; // R
コード例 #12
0
        # for more details about cv.BoundingRect,see documentation
        pt1 = (bound_rect[0], bound_rect[1])
        pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
        points.append(pt1)
        points.append(pt2)
        cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)

        # calculating centroids
        centroidx = cv.Round((pt1[0] + pt2[0]) / 2)
        centroidy = cv.Round((pt1[1] + pt2[1]) / 2)

        # identifying if blue blobs exist and adding centroids to corresponding lists.
        # note that the lower and upper bounds correspond to the the lower and upper bounds
        # in the getthresholdedimg(im): function earlier in the script.
        # e.g., yellow has a lowerbound of 95 and upper bound of 115 in both sections of code
        if (55 < cv.Get2D(imghsv, centroidy, centroidx)[0] < 155):
            blue.append((centroidx, centroidy))

    # draw colors in windows; exception handling is used to avoid IndexError.
    # after drawing is over, centroid from previous part is removed from list by pop.
    # so in next frame, centroids in this frame become initial points of line to draw

    # draw blue box around blue blimp blob
    try:
        cv.Circle(imdraw, blue[1], 5, (255, 0, 0))
        cv.Line(imdraw, blue[0], blue[1], (255, 0, 0), 3, 8, 0)
        blue.pop(0)
        print("centroid x:" + str(centroidx))
        print("centroid y:" + str(centroidy))
        print("")
    except IndexError:
コード例 #13
0
cap = cv2.VideoCapture("http://192.168.1.2:8080/videofeed?dummy=file.mjpeg")

cv.NamedWindow('HSV', cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow('HS_', cv.CV_WINDOW_AUTOSIZE)

# running the classifiers
storage = cv.CreateMemStorage()

while True:

    _, frame = cap.read()
    #frame = cv2.medianBlur(frame,5)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    hsv[:, :, 2] = 0
    cv2.imshow('HSV', hsv)
    # hsv[:,:,1] = cv2.threshold(hsv[:,:,1], 65, 255, cv2.THRESH_BINARY)
    # TR_MIN = np.array([5, 50, 50],np.uint8)
    # TR_MAX = np.array([15, 255, 255],np.uint8)
    # frame_threshed = cv2.inRange(hsv, TR_MIN, TR_MAX)
    cv.SetMouseCallback("camera", on_mouse, 0)
    s = cv.Get2D(cv.fromarray(hsv), y_co, x_co)
    if s[1] < 20:
        hsv[:, :, 1] = 0
    cv2.imshow('HS_', hsv)

    print "H:", s[0], "      S:", s[1], "       V:", s[2]

    c = cv.WaitKey(1)
    if c == 27:
        break
コード例 #14
0
ファイル: rasp_color_320.py プロジェクト: lentin/Sample_Codes
                    pub.publish(pub_string)
                    y_count = 0
                    print "Right Offset=", value

            elif (int(x) < 190 and int(x) > 130):
                value = 30

                cv.PutText(frame, "Center[" + str(value) + "]",
                           (int(x), int(y)), font, (0, 255, 0))
                pub_string = "c" + chr(int(value))
                pub.publish(pub_string)
                print "Center", value

        cv.SetMouseCallback("result", on_mouse, 0)
        if (count == 0):
            s = cv.Get2D(hsv, y_co, x_co)

            h1 = s[0] - 40
            h2 = s[0] + 40

            s1 = s[1] - 40
            s2 = s[1] + 40

            v1 = s[2] - 40
            v2 = s[2] + 40

            count = 1

#		print "H1:",h1,"      S1:",s1,"       V1:",v1
#    		print "H2:",h2,"      S2:",s2,"       V2:",v2
コード例 #15
0
		contour = contour.h_next()
		print contour
		# for more details about cv.BoundingRect,see documentation
		pt1 = (bound_rect[0], bound_rect[1])
		pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
		points.append(pt1)
		points.append(pt2)
		cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

	#	Calculating centroids

		centroidx = cv.Round((pt1[0]+pt2[0])/2)
		centroidy = cv.Round((pt1[1]+pt2[1])/2)

	#	Identifying if blue or yellow blobs and adding centroids to corresponding lists	
		if (20 < cv.Get2D(imghsv,centroidy,centroidx)[0] < 30):
			yellow.append((centroidx,centroidy))
		elif (100 < cv.Get2D(imghsv,centroidy,centroidx)[0] < 120): # 100 120
			blue.append((centroidx,centroidy))
		elif (150 < cv.Get2D(imghsv,centroidy,centroidx)[0] < 170):
			purple.append((centroidx,centroidy))

# 		Now drawing part. Exceptional handling is used to avoid IndexError.	After drawing is over, centroid from previous part is #		removed from list by pop. So in next frame,centroids in this frame become initial points of line to draw.		
	try:
		cv.Circle(imdraw, yellow[1], 5, (0,255,255))
		cv.Line(imdraw, yellow[0], yellow[1], (0,255,255), 3, 8, 0)
		yellow.pop(0)
	except IndexError:
		print "Just wait for yellow"

	try:
コード例 #16
0
		
		# UPDATED 9/22: 20 X AND Y PIXEL MINIMUM TO DRAW RED BOX AROUND BLIMP
		if ypix > 20 and xpix > 20:
			cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

		# calculating centroids
		centroidx = cv.Round((pt1[0]+pt2[0])/2)
		centroidy = cv.Round((pt1[1]+pt2[1])/2)

		# identifying if blue blobs exist and adding centroids to corresponding lists.
		# note that the lower and upper bounds correspond to the the lower and upper bounds
		# in the getthresholdedimg(im): function earlier in the script.
		# e.g., yellow has a lowerbound of 95 and upper bound of 115 in both sections of code
		
		# UPDATED 9/22: 20 X AND Y PIXEL MINIMUM TO BE APPENDED TO CENTROID LISTS
		if (55 < cv.Get2D(imghsv,centroidy,centroidx)[0] < 155) and ypix > 20 and xpix > 20: 
			blue.append((centroidx,centroidy))

	# draw colors in windows; exception handling is used to avoid IndexError.	
	# after drawing is over, centroid from previous part is removed from list by pop. 
	# so in next frame, centroids in this frame become initial points of line to draw	
	
	# draw blue box around blue blimp blob
	try:
		cv.Circle(imdraw, blue[1], 5, (255,0,0))
		cv.Line(imdraw, blue[0], blue[1], (255,0,0), 3, 8, 0) 
		print('xpix:'+str(xpix))
		blue.pop(0)
		print("centroid x:" + str(centroidx))
		print("centroid y:" + str(centroidy))
		print("")		
コード例 #17
0
import cv2.cv as cv

im = cv.LoadImageM("../img/alkaline.jpg")

#Access a specific pixel
print im[3, 3]

print cv.Get1D(im, 3)

print cv.Get2D(im, 3, 3)  # etc..

#cv.GetND(im, [3,3,3,3]) for a 4 dimension array

col0 = cv.GetCol(im, 0)  #Return the first column
cols = cv.GetCols(im, 0, 10)  # Return a matrix of the ten first column

row = cv.GetRow(im, 0)  #Return the first row (first pixels line)
rows = cv.GetRows(im, 0, 10)  # Return the ten first rows of the image

#---------------------------

#Iterate throught pixels
red_sum = 0
green_sum = 0
blue_sum = 0
c = 0
for i in range(0, im.rows - 1):
    for j in range(0, im.cols - 1):
        c = c + 1
        red_sum += im[i, j][0]
        green_sum += im[i, j][1]