Пример #1
0
def getContours(im, approx_value=1):  #Return contours approximated
    storage = cv.CreateMemStorage(0)
    contours = cv.FindContours(cv.CloneImage(im), storage, cv.CV_RETR_CCOMP,
                               cv.CV_CHAIN_APPROX_SIMPLE)
    contourLow = cv.ApproxPoly(contours, storage, cv.CV_POLY_APPROX_DP,
                               approx_value, approx_value)
    return contourLow
Пример #2
0
def find_squares_from_binary( gray ):
    """
    use contour search to find squares in binary image
    returns list of numpy arrays containing 4 points
    """
    squares = []
    storage = cv.CreateMemStorage(0)
    contours = cv.FindContours(gray, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, (0,0))  
    storage = cv.CreateMemStorage(0)
    while contours:
        #approximate contour with accuracy proportional to the contour perimeter
        arclength = cv.ArcLength(contours)
        polygon = cv.ApproxPoly( contours, storage, cv.CV_POLY_APPROX_DP, arclength * 0.02, 0)
        if is_square(polygon):
            squares.append(polygon[0:4])
        contours = contours.h_next()

    return squares
Пример #3
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        #nframes =+ 1

        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)

        def totuple(a):
            try:
                return tuple(totuple(i) for i in a)
            except TypeError:
                return a

        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]

            color_image = cv.QueryFrame(self.capture)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, .1, None)
                cv.ShowImage("BG", moving_average)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)
            #cv.ShowImage("BG",difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            cv.ShowImage("BG1", grey_image)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 40, 255, cv.CV_THRESH_BINARY)
            #cv.ShowImage("BG2", grey_image)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 8)
            cv.Erode(grey_image, grey_image, None, 3)
            cv.ShowImage("BG3", grey_image)

            storage = cv.CreateMemStorage(0)
            global contour
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)

            points = []

            while contour:
                global bound_rect
                bound_rect = cv.BoundingRect(list(contour))
                polygon_points = cv.ApproxPoly(list(contour), storage,
                                               cv.CV_POLY_APPROX_DP)
                contour = contour.h_next()

                global pt1, pt2
                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])

                #size control
                if (bound_rect[0] - bound_rect[2] >
                        10) and (bound_rect[1] - bound_rect[3] > 10):

                    points.append(pt1)
                    points.append(pt2)

                    #points += list(polygon_points)
                    global box, box2, box3, box4, box5
                    box = cv.MinAreaRect2(polygon_points)
                    box2 = cv.BoxPoints(box)
                    box3 = np.int0(np.around(box2))
                    box4 = totuple(box3)
                    box5 = box4 + (box4[0], )

                    cv.FillPoly(grey_image, [
                        list(polygon_points),
                    ], cv.CV_RGB(255, 255, 255), 0, 0)
                    cv.PolyLine(color_image, [
                        polygon_points,
                    ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                    cv.PolyLine(color_image, [list(box5)], 0, (0, 0, 255), 2)
                    #cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

                    if len(points):
                        #center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)
                        center1 = (pt1[0] + pt2[0]) / 2
                        center2 = (pt1[1] + pt2[1]) / 2
                        #print center1, center2, center_point
                        #cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
                        #cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                        #cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                        cv.Circle(color_image, (center1, center2), 5,
                                  cv.CV_RGB(0, 0, 255), -1)

            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                #cv.DestroyAllWindows()
                break
Пример #4
0
    # create the window for the contours
    cv.NamedWindow("contours", cv.CV_WINDOW_NORMAL)

    # create the trackbar, to enable the change of the displayed level
    cv.CreateTrackbar("levels+3", "contours", 3, 7, on_contour)

    # create the storage area for contour image
    storage = cv.CreateMemStorage(0)

    # find the contours
    contours = cv.FindContours(image, storage, cv.CV_RETR_TREE,
                               cv.CV_CHAIN_APPROX_SIMPLE, (0, 0))

    # polygon approxomation
    contours = cv.ApproxPoly(contours, storage, cv.CV_POLY_APPROX_DP, 3, 1)

    # call one time the callback, so we will have the 1st display of contour window
    on_contour(_DEFAULT_LEVEL)

    cv.WaitKey(0)
    cv.SaveImage('C:\\3d-Model\\bin\\segmentation_files\\pic_contours.jpg',
                 contours_image)  # save the image as png
    cv.DestroyAllWindows()

    #log code contour to file for progress
    with open('C:\\3d-Model\\bin\\segmentation_files\\progress.txt',
              'w') as myFile:
        myFile.write("contour")

    #open gimp after end of thresholding and contour
Пример #5
0
    def run(self):
        # Initialize
        # log_file_name = "tracker_output.log"
        # log_file = file( log_file_name, 'a' )

        print "hello"

        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame(self.capture)

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage(cv.GetSize(frame),
                                               cv.IPL_DEPTH_32F, 3)

        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage(display_image)

        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)

        # The difference between the running average and the current frame:
        difference = cv.CloneImage(display_image)

        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook = []
        frame_count = 0
        last_frame_entity_list = []

        t0 = time.time()

        # For toggling display:
        image_list = ["camera", "difference", "threshold", "display", "faces"]
        image_index = 3  # Index into image_list

        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1,
                                cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 5

        while True:

            # Capture frame from webcam
            camera_image = cv.QueryFrame(self.capture)

            frame_count += 1
            frame_t0 = time.time()

            # Create an image with interactive feedback:
            display_image = cv.CloneImage(camera_image)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage(display_image)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)

            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg(color_image, running_average_image, 0.320, None)

            #             cv.ShowImage("background ", running_average_image)

            # Convert the scale of the moving average.
            cv.ConvertScale(running_average_image,
                            running_average_in_display_color_depth, 1.0, 0.0)

            # Subtract the current frame from the moving average.
            cv.AbsDiff(color_image, running_average_in_display_color_depth,
                       difference)

            cv.ShowImage("difference ", difference)

            # Convert the image to greyscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Threshold the image to a black and white motion mask:
            cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY)
            # Smooth and threshold again to eliminate "sparkles"
            cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0)

            cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)

            grey_image_as_array = numpy.asarray(cv.GetMat(grey_image))
            non_black_coords_array = numpy.where(grey_image_as_array > 3)
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip(non_black_coords_array[1],
                                         non_black_coords_array[0])

            points = [
            ]  # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []

            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours(grey_image, mem_storage,
                                      cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)

            levels = 10
            while contour:

                bounding_rect = cv.BoundingRect(list(contour))
                point1 = (bounding_rect[0], bounding_rect[1])
                point2 = (bounding_rect[0] + bounding_rect[2],
                          bounding_rect[1] + bounding_rect[3])

                bounding_box_list.append((point1, point2))
                polygon_points = cv.ApproxPoly(list(contour), mem_storage,
                                               cv.CV_POLY_APPROX_DP)

                # To track polygon points only (instead of every pixel):
                # points += list(polygon_points)

                # Draw the contours:
                cv.DrawContours(color_image, contour, cv.CV_RGB(255, 0, 0),
                                cv.CV_RGB(0, 255, 0), levels, 3, 0, (0, 0))
                cv.FillPoly(grey_image, [
                    list(polygon_points),
                ], cv.CV_RGB(255, 255, 255), 0, 0)
                cv.PolyLine(display_image, [
                    polygon_points,
                ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                # cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)

                contour = contour.h_next()

            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append(box_width * box_height)

                # cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)

            average_box_area = 0.0
            if len(box_areas):
                average_box_area = float(sum(box_areas)) / len(box_areas)

            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]

                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area * 0.1:
                    trimmed_box_list.append(box)

            # Draw the trimmed box list:
            # for box in trimmed_box_list:
            #    cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )

            bounding_box_list = merge_collided_bboxes(trimmed_box_list)

            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle(display_image, box[0], box[1],
                             cv.CV_RGB(0, 255, 0), 1)

            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len(bounding_box_list)

            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.

            if frame_t0 - last_target_change_t < .350:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1:
                    estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1:
                    estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0

            # Clip to the user-supplied maximum:
            estimated_target_count = min(estimated_target_count, max_targets)

            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.

            # Using the numpy values directly (treating all pixels as points):
            points = non_black_coords_array
            center_points = []

            if len(points):

                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max(estimated_target_count,
                                 1)  # Need at least one target to look for.
                if len(codebook) == estimated_target_count:
                    k_or_guess = codebook

                # points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans(array(points), k_or_guess)

                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = (int(center_point[0]), int(center_point[1]))
                    center_points.append(center_point)
                    # cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    # cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)

            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []

            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []

                for center_point in center_points:
                    if    center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                        center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :

                        # This point is within the box.
                        center_points_in_box.append(center_point)

                # Now see if there are more than one.  If so, merge them.
                if len(center_points_in_box) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])

                    average_x = int(float(sum(x_list)) / len(x_list))
                    average_y = int(float(sum(y_list)) / len(y_list))

                    trimmed_center_points.append((average_x, average_y))

                    # Record that they were removed:
                    removed_center_points += center_points_in_box

                if len(center_points_in_box) == 1:
                    trimmed_center_points.append(
                        center_points_in_box[0])  # Just use it.

            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (
                        not center_point in removed_center_points):
                    trimmed_center_points.append(center_point)

            # Draw what we found:
            # for center_point in trimmed_center_points:
            #    center_point = ( int(center_point[0]), int(center_point[1]) )
            #    cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #    cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #    cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #    cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)

            # Determine if there are any new (or lost) targets:
            actual_target_count = len(trimmed_center_points)
            last_target_count = actual_target_count

            # Now build the list of physical entities (objects)
            this_frame_entity_list = []

            # An entity is list: [ name, color, last_time_seen, last_known_coords ]

            for target in trimmed_center_points:

                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}

                for entity in last_frame_entity_list:

                    entity_coords = entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]

                    distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2))
                    entity_distance_dict[distance] = entity

                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()

                for distance in distance_list:

                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[distance]

                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        # print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] )
                        continue

                    # print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1])
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[
                        2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[
                        3] = target  # Update the new location
                    this_frame_entity_list.append(nearest_possible_entity)
                    # log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break

                if entity_found == False:
                    # It's a new entity.
                    color = (random.randint(0, 255), random.randint(0, 255),
                             random.randint(0, 255))
                    name = hashlib.md5(str(frame_t0) +
                                       str(color)).hexdigest()[:6]
                    last_time_seen = frame_t0

                    new_entity = [name, color, last_time_seen, target]
                    this_frame_entity_list.append(new_entity)
                    # log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )

            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.

            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    # log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append(entity)

            # For next frame:
            last_frame_entity_list = this_frame_entity_list

            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10,
                          cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point, 5,
                          cv.CV_RGB(c[0], c[1], c[2]), 3)

            # print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break

            # Toggle which image to show


#             if chr(c) == 'd':
#                 image_index = ( image_index + 1 ) % len( image_list )
#
#             image_name = image_list[ image_index ]
#
#             # Display frame to user
#             if image_name == "camera":
#                 image = camera_image
#                 cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color )
#             elif image_name == "difference":
#                 image = difference
#                 cv.PutText( image, "Difference Image", text_coord, text_font, text_color )
#             elif image_name == "display":
#                 image = display_image
#                 cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color )
#             elif image_name == "threshold":
#                 # Convert the image to color.
#                 cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
#                 image = display_image  # Re-use display image here
#                 cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
#             elif image_name == "faces":
#                 # Do face detection
#                 detect_faces( camera_image, haar_cascade, mem_storage )
#                 image = camera_image  # Re-use camera image here
#                 cv.PutText( image, "Face Detection", text_coord, text_font, text_color )
#             cv.ShowImage( "Target", image )

            image1 = display_image

            cv.ShowImage("Target 1", image1)

            #             if self.writer:
            #                 cv.WriteFrame( self.writer, image );

            # log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
            if not self.writer:
                delta_t = frame_t1 - frame_t0
                if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float(frame_count) / time_delta
        print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta,
                                                  processed_fps)
def compare_2_formes(Image1, Image2):
    mincontour = 500  # minimum size of a form to be detected
    CVCONTOUR_APPROX_LEVEL = 5  # parameter for call contour
    img_edge1 = cv.CreateImage(cv.GetSize(Image1), 8, 1)  #egde image

    #        img1_8uc3=cv.CreateImage(cv.GetSize(Image1),8,3)

    img_edge2 = cv.CreateImage(cv.GetSize(Image2), 8, 1)
    #       img2_8uc3=cv.CreateImage(cv.GetSize(Image2),8,3)

    cv.Threshold(Image1, img_edge1, 123, 255,
                 cv.CV_THRESH_BINARY)  # filter threshold
    cv.Threshold(Image2, img_edge2, 123, 255, cv.CV_THRESH_BINARY)

    storage1 = cv.CreateMemStorage()
    storage2 = cv.CreateMemStorage()

    first_contour1 = cv.FindContours(
        img_edge1, storage1)  # pointer to the first edge of the form 1
    first_contour2 = cv.FindContours(
        img_edge2, storage2)  # pointer to the first edge of the form 2

    newseq = first_contour1
    newseq2 = first_contour2

    if not (first_contour1) or not (first_contour2):
        return 0

    current_contour = first_contour1
    while 1:
        current_contour = current_contour.h_next(
        )  # path in the sequence of edges of the first form
        if (not (current_contour)
            ):  # stop condition if the contour pointer = NULL
            break

        if cv.ContourArea(current_contour) > mincontour:
            newseq = cv.ApproxPoly(current_contour, storage1,
                                   cv.CV_POLY_APPROX_DP,
                                   CVCONTOUR_APPROX_LEVEL, 0)
    #          cv.CvtColor(Image1,img1_8uc3,cv.CV_GRAY2BGR );
    #          cv.DrawContours(img1_8uc3,newseq,cv.CV_RGB(0,255,0),cv.CV_RGB(255,0,0),0,2,8);
    #          cv.NamedWindow("ContourImage2",cv.CV_WINDOW_AUTOSIZE)
    #          cv.ShowImage("ContourImage2",img1_8uc3)

    current_contour = first_contour2

    # path of the second form of contours
    while 1:
        current_contour = current_contour.h_next()
        if (not (current_contour)):
            break

        if cv.ContourArea(current_contour) > mincontour:
            newseq2 = cv.ApproxPoly(current_contour, storage2,
                                    cv.CV_POLY_APPROX_DP,
                                    CVCONTOUR_APPROX_LEVEL, 0)
    #          cv.CvtColor(Image2,img2_8uc3,cv.CV_GRAY2BGR);
    #          cv.DrawContours(img2_8uc3,newseq2,cv.CV_RGB(0,255,0),cv.CV_RGB(255,0,0),0,2,8);
    #          cv.NamedWindow("ContourImage",cv.CV_WINDOW_AUTOSIZE)
    #          cv.ShowImage("ContourImage",img2_8uc3)

    matchresult = 1
    matchresult = cv.MatchShapes(newseq, newseq2, 1, 2)
    return matchresult
Пример #7
0
def findSquares4(img, storage):
    N = 11
    sz = (img.width & -2, img.height & -2)
    timg = cv.CloneImage(img); # make a copy of input image
    gray = cv.CreateImage(sz, 8, 1)
    pyr = cv.CreateImage((sz.width/2, sz.height/2), 8, 3)
    # create empty sequence that will contain points -
    # 4 points per square (the square's vertices)
    squares = cv.CreateSeq(0, sizeof_CvSeq, sizeof_CvPoint, storage)
    squares = CvSeq_CvPoint.cast(squares)

    # select the maximum ROI in the image
    # with the width and height divisible by 2
    subimage = cv.GetSubRect(timg, cv.Rect(0, 0, sz.width, sz.height))

    # down-scale and upscale the image to filter out the noise
    cv.PyrDown(subimage, pyr, 7)
    cv.PyrUp(pyr, subimage, 7)
    tgray = cv.CreateImage(sz, 8, 1)
    # find squares in every color plane of the image
    for c in range(3):
        # extract the c-th color plane
        channels = [None, None, None]
        channels[c] = tgray
        cv.Split(subimage, channels[0], channels[1], channels[2], None)
        for l in range(N):
            # hack: use Canny instead of zero threshold level.
            # Canny helps to catch squares with gradient shading
            if(l == 0):
                # apply Canny. Take the upper threshold from slider
                # and set the lower to 0 (which forces edges merging)
                cv.Canny(tgray, gray, 0, thresh, 5)
                # dilate canny output to remove potential
                # holes between edge segments
                cv.Dilate(gray, gray, None, 1)
            else:
                # apply threshold if l!=0:
                #     tgray(x, y) = gray(x, y) < (l+1)*255/N ? 255 : 0
                cv.Threshold(tgray, gray, (l+1)*255/N, 255, cv.CV_THRESH_BINARY)

            # find contours and store them all as a list
            count, contours = cv.FindContours(gray, storage, sizeof_CvContour,
                cv.CV_RETR_LIST, cv. CV_CHAIN_APPROX_SIMPLE, (0, 0))

            if not contours:
                continue

            # test each contour
            for contour in contours.hrange():
                # approximate contour with accuracy proportional
                # to the contour perimeter
                result = cv.ApproxPoly(contour, sizeof_CvContour, storage,
                    cv.CV_POLY_APPROX_DP, cv.ContourPerimeter(contours)*0.02, 0)
                # square contours should have 4 vertices after approximation
                # relatively large area (to filter out noisy contours)
                # and be convex.
                # Note: absolute value of an area is used because
                # area may be positive or negative - in accordance with the
                # contour orientation
                if(result.total == 4 and
                    abs(cv.ContourArea(result)) > 1000 and
                    cv.CheckContourConvexity(result)):
                    s = 0
                    for i in range(5):
                        # find minimum angle between joint
                        # edges (maximum of cosine)
                        if(i >= 2):
                            t = abs(angle(result[i], result[i-2], result[i-1]))
                            if s<t:
                                s=t
                    # if cosines of all angles are small
                    # (all angles are ~90 degree) then write quandrange
                    # vertices to resultant sequence
                    if(s < 0.3):
                        for i in range(4):
                            squares.append(result[i])

    return squares
Пример #8
0
    def run(self):
        # Initialize
        log_file_name = "tracker_output.log"
        log_file = open( log_file_name, 'a' )
            #fps = 25

        #cap = cv2.VideoCapture( '../000104-.avi'

        frame = cv.QueryFrame( self.capture )
        frame_size = cv.GetSize( frame )
        foreground = cv.CreateImage(cv.GetSize(frame),8,1)
        foremat = cv.GetMat(foreground)
        Nforemat = numpy.array(foremat, dtype=numpy.float32)
	gfilter=sys.argv[2]
	gfilter_string=gfilter
	gfilter=float(gfilter)
	print "Processing Tracker with filter: " + str(gfilter)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame( self.capture )

	# Create Background Subtractor
        fgbg = cv2.BackgroundSubtractorMOG()

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage( cv.GetSize(frame), cv.IPL_DEPTH_8U, 1 )


        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage( cv.GetSize(frame), cv.IPL_DEPTH_32F, 3 )
        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage( display_image )


        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)


        # The difference between the running average and the current frame:
        difference = cv.CloneImage( display_image )


        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook=[]
        frame_count=0
        last_frame_entity_list = []
        fps = 25


        t0 = 165947


        # For toggling display:
        image_list = [ "camera", "shadow", "white", "threshold", "display", "yellow" ]
        image_index = 0   # Index into image_list




        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1, cv.CV_AA )
        text_coord = ( 5, 15 )
        text_color = cv.CV_RGB(255,255,255)
        text_coord2 = ( 5, 30 )
        text_coord3 = ( 5, 45 )

        ###############################
        ### Face detection stuff
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
        #haar_cascade = cv.Load( 'C:/OpenCV2.2/data/haarcascades/haarcascade_frontalface_alt.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_eye.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )


        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 20


        while True:


            # Capture frame from webcam
            camera_image = cv.QueryFrame( self.capture )
            #ret, frame = cap.read()

            frame_count += 1
            frame_t0 = time.time()
            mat = cv.GetMat(camera_image)
            Nmat = numpy.array(mat, dtype=numpy.uint8)


            # Create an image with interactive feedback:
            display_image = cv.CloneImage( camera_image )

            # NEW INSERT - FGMASK
            fgmask = fgbg.apply(Nmat,Nforemat,-1)
            fgmask = cv.fromarray(fgmask)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage( display_image )


            # Smooth to get rid of false positives
            cv.Smooth( color_image, color_image, cv.CV_GAUSSIAN, 19, 0 ) #Changed from 19 AND MADE MEDIAN FILTER


            # Smooth to get rid of false positives

#            color_image = numpy.asarray( cv.GetMat( color_image ) )
#            (mu, sigma) = cv2.meanStdDev(color_image)
#            edges = cv2.Canny(color_image, mu - sigma, mu + sigma)
#            lines = cv2.HoughLines(edges, 1, pi / 180, 70)


            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg( color_image, running_average_image, gfilter, None )


            # Convert the scale of the moving average.
            cv.ConvertScale( running_average_image, running_average_in_display_color_depth, 1.0, 0.0 )


            # Subtract the current frame from the moving average.
            cv.AbsDiff( color_image, running_average_in_display_color_depth, difference )


            # Convert the image to greyscale.
            cv.CvtColor( difference, grey_image, cv.CV_RGB2GRAY )
            # Smooth Before thresholding
            cv.Smooth( grey_image, grey_image, cv.CV_GAUSSIAN, 19, 19 )
            # Threshold the image to a black and white motion mask:
            cv.Threshold( grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY )
            # Smooth and threshold again to eliminate "sparkles"
            #cv.Smooth( grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0 )  #changed from 19 - AND put smooth before threshold
            cv.Threshold( grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)


            grey_image_as_array = numpy.asarray( cv.GetMat( grey_image ) )
            non_black_coords_array = numpy.where( grey_image_as_array > 3 )
            # Convert from numpy.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip( non_black_coords_array[1], non_black_coords_array[0] )

            frame_hsv = cv.CreateImage(cv.GetSize(color_image),8,3)
            cv.CvtColor(color_image,frame_hsv,cv.CV_BGR2HSV)
            imgthreshold_yellow=cv.CreateImage(cv.GetSize(color_image),8,1)
            imgthreshold_white=cv.CreateImage(cv.GetSize(color_image),8,1)
            imgthreshold_white2=cv.CreateImage(cv.GetSize(color_image),8,1)
            cv.InRangeS(frame_hsv,cv.Scalar(0,0,196),cv.Scalar(255,255,255),imgthreshold_white)  # changed scalar from 255,15,255 to 255,255,255
            cv.InRangeS(frame_hsv,cv.Scalar(41,43,224),cv.Scalar(255,255,255),imgthreshold_white2)
            cv.InRangeS(frame_hsv,cv.Scalar(20,100,100),cv.Scalar(30,255,255),imgthreshold_yellow)
            #cvCvtColor(color_image, yellowHSV, CV_BGR2HSV)
            #lower_yellow = np.array([10, 100, 100], dtype=np.uint8)
            #upper_yellow = np.array([30, 255, 255], dtype=np.uint8)
            #mask_yellow = cv2.inRange(yellowHSV, lower_yellow, upper_yellow)
            #res_yellow = cv2.bitwise_and(color_image, color_image, mask_yellow = mask_yellow)


            points = []   # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []


            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours( grey_image, mem_storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE )

            i=0
            while contour:

#                c = contour[i]
#                m = cv2.moments(c)
#                Area  = m['m00']
#                print( Area )

                bounding_rect = cv.BoundingRect( list(contour) )
                point1 = ( bounding_rect[0], bounding_rect[1] )
                point2 = ( bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3] )


                bounding_box_list.append( ( point1, point2 ) )
                polygon_points = cv.ApproxPoly( list(contour), mem_storage, cv.CV_POLY_APPROX_DP )


                # To track polygon points only (instead of every pixel):
                #points += list(polygon_points)


                # Draw the contours:
                ###cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
                cv.FillPoly( grey_image, [ list(polygon_points), ], cv.CV_RGB(255,255,255), 0, 0 )
                cv.PolyLine( display_image, [ polygon_points, ], 0, cv.CV_RGB(255,255,255), 1, 0, 0 )
                #cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)
#        if Area > 3000:
#            cv2.drawContours(imgrgb,[cnt],0,(255,255,255),2)
#            print(Area)

                i=i+1
                contour = contour.h_next()




            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append( box_width * box_height )


                #cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)


            average_box_area = 0.0
            if len(box_areas): average_box_area = float( sum(box_areas) ) / len(box_areas)


            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]


                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area*0.1: trimmed_box_list.append( box )


            # Draw the trimmed box list:
            #for box in trimmed_box_list:
            #    cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )


            bounding_box_list = merge_collided_bboxes( trimmed_box_list )


            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 1 )


            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len( bounding_box_list )


            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.


            if frame_t0 - last_target_change_t < .35:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1: estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1: estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0


            # Clip to the user-supplied maximum:
            estimated_target_count = min( estimated_target_count, max_targets )


            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.


            # Using the numpy values directly (treating all pixels as points):
            points = non_black_coords_array
            center_points = []


            if len(points):


                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max( estimated_target_count, 1 )  # Need at least one target to look for.
                if len(codebook) == estimated_target_count:
                    k_or_guess = codebook


                #points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans( array( points ), k_or_guess )


                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = ( int(center_point[0]), int(center_point[1]) )
                    center_points.append( center_point )
                    #cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    #cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)


            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []


            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []


                for center_point in center_points:
                    if    center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                        center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :


                        # This point is within the box.
                        center_points_in_box.append( center_point )


                # Now see if there are more than one.  If so, merge them.
                if len( center_points_in_box ) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])


                    average_x = int( float(sum( x_list )) / len( x_list ) )
                    average_y = int( float(sum( y_list )) / len( y_list ) )


                    trimmed_center_points.append( (average_x, average_y) )


                    # Record that they were removed:
                    removed_center_points += center_points_in_box


                if len( center_points_in_box ) == 1:
                    trimmed_center_points.append( center_points_in_box[0] ) # Just use it.


            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (not center_point in removed_center_points):
                    trimmed_center_points.append( center_point )


            # Draw what we found:
            #for center_point in trimmed_center_points:
            #    center_point = ( int(center_point[0]), int(center_point[1]) )
            #    cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #    cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #    cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #    cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)


            # Determine if there are any new (or lost) targets:
            actual_target_count = len( trimmed_center_points )
            last_target_count = actual_target_count


            # Now build the list of physical entities (objects)
            this_frame_entity_list = []


            # An entity is list: [ name, color, last_time_seen, last_known_coords ]


            for target in trimmed_center_points:


                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}


                for entity in last_frame_entity_list:


                    entity_coords= entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]


                    distance = sqrt( pow(delta_x,2) + pow( delta_y,2) )
                    entity_distance_dict[ distance ] = entity


                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()


                for distance in distance_list:


                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[ distance ]


                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        #print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] ) #Commented Out 3/20/2016
                        continue


                    #print "Target %s pixel(b,g,r) : USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1]) # Commented Out 3/20/2016
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[3] = target  # Update the new location
                    this_frame_entity_list.append( nearest_possible_entity )
                    #log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_count, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break


                if entity_found == False:
                    # It's a new entity.
                    color = ( random.randint(0,255), random.randint(0,255), random.randint(0,255) )
                    name = hashlib.md5( str(frame_t0) + str(color) ).hexdigest()[:6]
                    last_time_seen = frame_t0
                    if imgthreshold_white[target[1],target[0]] == 0.0:
                        # It's a real detect (not a line marker)

                        new_entity = [ name, color, last_time_seen, target ]
                        this_frame_entity_list.append( new_entity )
                        log_file.write( "%.3f %.3f FOUND %s %d %d\n" % ( frame_count/fps, frame_count, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
                        filedrive = "C:/Users/525494/New_folder/000216/run_096/"
                        filename = "img"+str(name)
			#print "gfilter is: %.2f" + gfilter
                        cv.SaveImage("image-test%s-%3f.png"%(new_entity[0],gfilter), display_image)
                    elif imgthreshold_white[target[1],target[0]] == 255.0:
                        # It's a white line detect

                        new_entity = [ name, color, last_time_seen, target ]
                        this_frame_entity_list.append( new_entity )
                        log_file.write( "%.3f %.3f FOUND %s %d %d\n" % ( frame_count/fps, frame_count, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
                        filedrive = "C:/Users/525494/New_folder/000216/run_096/"
                        filename = "img"+str(name)
			#print "gfilter is: %.2f" + gfilter
                        cv.SaveImage("white-line-image-test%s-%3f.png"%(new_entity[0],gfilter), display_image)
                    elif imgthreshold_yellow[target[1],target[0]] == 255.0:
                        # It's a yellow line detect

                        new_entity = [ name, color, last_time_seen, target ]
                        this_frame_entity_list.append( new_entity )
                        log_file.write( "%.3f %.3f FOUND %s %d %d\n" % ( frame_count/fps, frame_count, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )
                        filedrive = "C:/Users/525494/New_folder/000216/run_096/"
                        filename = "img"+str(name)
                        cv.SaveImage("yellow-line-image-test%s.png"%(new_entity[0]), camera_image)

            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.


            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    #log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_count, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append( entity )


            # For next frame:
            last_frame_entity_list = this_frame_entity_list


            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20, cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15, cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10, cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point,  5, cv.CV_RGB(c[0], c[1], c[2]), 3)




            #print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break


            # Toggle which image to show
            if chr(c) == 'd':
                image_index = ( image_index + 1 ) % len( image_list )


            image_name = image_list[ image_index ]


            # Display frame to user
            if image_name == "camera":
                image = camera_image
                cv.PutText( image, "Camera (Normal)", text_coord, text_font, text_color )
            elif image_name == "shadow":
                image = fgmask
                cv.PutText( image, "No Shadow", text_coord, text_font, text_color )
            elif image_name == "white":
                #image = difference
                image = imgthreshold_white
                cv.PutText( image, "White Threshold", text_coord, text_font, text_color )
            elif image_name == "display":
                #image = display_image
                image = display_image
                cv.PutText( image, "Targets (w/AABBs and contours)", text_coord, text_font, text_color )
                cv.PutText( image, str(frame_t0), text_coord2, text_font, text_color )
                cv.PutText( image, str(frame_count), text_coord3, text_font, text_color )
            elif image_name == "threshold":
                # Convert the image to color.
                cv.CvtColor( grey_image, display_image, cv.CV_GRAY2RGB )
                image = display_image  # Re-use display image here
                cv.PutText( image, "Motion Mask", text_coord, text_font, text_color )
            elif image_name == "yellow":
                # Do face detection
                #detect_faces( camera_image, haar_cascade, mem_storage )
                image = imgthreshold_yellow  # Re-use camera image here
                cv.PutText( image, "Yellow Threshold", text_coord, text_font, text_color )


            #cv.ShowImage( "Target", image )		Commented out 3/19

#            self.writer.write( image )
#            out.write( image );
#            cv.WriteFrame( self.writer, image );
#            if self.writer:
#                cv.WriteFrame( self.writer, image );
#                video.write( image );

            log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
#            if not self.writer:
#                delta_t = frame_t1 - frame_t0
#                if delta_t < ( 1.0 / 15.0 ): time.sleep( ( 1.0 / 15.0 ) - delta_t ):

            if frame_count == 155740:
                cv2.destroyWindow("Target")
#                    cv.ReleaseVideoWriter()
#                    self.writer.release()
#                    log_file.flush()
                break

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float( frame_count ) / time_delta
        print "Got %d frames. %.1f s. %f fps." % ( frame_count, time_delta, processed_fps )