Exemplo n.º 1
0
    def tracking(self, frame):
        ct = CentroidTracker(maxDisappeared=10, maxDistance=30)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(self.rects)

        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = self.trackableObjects.get(objectID, None)
            # print("ID: " + str(objectID) + " cX, cY: " + str(centroid))
            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                print(y)
                direction = centroid[1] - np.mean(y)

                # print(str(np.mean(x)))
                # print("direction: " +  str(direction) + " centroid: " + str(centroid[0]))
                # print("dir: " +  str(direction) + "centroid:" + str(centroid[0]) + ", " + str(centroid[1]))

                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and y[0] >= 240 and y[-1] <= 240:
                        self.totalUp += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and y[0] <= 240 and y[-1] >= 240:
                        self.totalDown += 1
                        to.counted = True

            # store the trackable object in our dictionary
            self.trackableObjects[objectID] = to

            # отображение центроида объекта
            cv2.circle(frame, (centroid[0], centroid[1]), 6, (255, 255, 255),
                       -1)

            # ID объекта
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
Exemplo n.º 2
0
    def __init__(self,
                 run_id,
                 folder,
                 cameraname,
                 saturation=120,
                 minArea=200,
                 downsampling=2,
                 drawBoxes=True,
                 writeImages=False,
                 min_sharpness=200,
                 learningRate=0.001,
                 debug=False):
        self.run_id = run_id
        self.folder = folder
        self.cameraname = cameraname

        self.saturation = saturation
        self.minArea = minArea
        self.downsampling = downsampling
        self.drawBoxes = drawBoxes
        self.writeImages = writeImages
        self.min_sharpness = min_sharpness
        self.learningRate = learningRate
        self.debug = debug
        #print("self.saturation : "  + str(self.saturation))
        #print("self.downsampling : "  + str(self.downsampling))
        self.bpd = BasicPartsDetector(cameraname=self.cameraname,
                                      saturation=self.saturation,
                                      downsampling=self.downsampling,
                                      drawBoxes=self.drawBoxes,
                                      writeImages=self.writeImages,
                                      min_sharpness=self.min_sharpness,
                                      learningRate=self.learningRate,
                                      debug=self.debug)

        self.writeImages = writeImages
        self.newPartCounter = 1
        self.count = 1
        self.part_id = None
        self.sql = SQLModel(run_id)

        self.threshold_minx1 = (1920 / 334)
        self.threshold_minx2 = (1920 / 1568)
        self.threshold_miny = (1080 / 252)

        self.threshold_maxx1 = (1920 / 1)
        self.threshold_maxx2 = (1920 / 1920)
        self.threshold_maxy = (1080 / 1030)
        self.ct = CentroidTracker()
Exemplo n.º 3
0
    def __init__(self,
                 run_id,
                 folder,
                 cameraname,
                 saturation=120,
                 minArea=100,
                 downsampling=2,
                 drawBoxes=True,
                 writeImages=False,
                 min_sharpness=200,
                 learningRate=0.001,
                 debug=False):
        self.run_id = run_id
        self.folder = folder
        self.cameraname = cameraname

        self.saturation = saturation
        self.minArea = minArea
        self.downsampling = downsampling
        self.drawBoxes = drawBoxes
        self.writeImages = writeImages
        self.min_sharpness = min_sharpness
        self.learningRate = learningRate
        self.debug = debug
        self.bpd = BasicPartsDetector(cameraname=self.cameraname,
                                      saturation=self.saturation,
                                      downsampling=self.downsampling,
                                      drawBoxes=self.drawBoxes,
                                      writeImages=self.writeImages,
                                      min_sharpness=self.min_sharpness,
                                      learningRate=self.learningRate,
                                      debug=self.debug)
        self.geometry = Geometry(debug=self.debug)
        self.boundary_left, self.boundary_right = self.geometry.getCameraNameBoundaries(
        )

        self.writeImages = writeImages
        self.newPartCounter = 1
        self.count = 1
        self.part_id = None
        self.sql = SQLModel(run_id)

        self.ct = CentroidTracker()
        self.calibrated = False
Exemplo n.º 4
0
class PartsDetectorUsbCam:
    def __init__(self,
                 run_id,
                 folder,
                 cameraname,
                 saturation=120,
                 minArea=200,
                 downsampling=2,
                 drawBoxes=True,
                 writeImages=False,
                 min_sharpness=200,
                 learningRate=0.001,
                 debug=False):
        self.run_id = run_id
        self.folder = folder
        self.cameraname = cameraname

        self.saturation = saturation
        self.minArea = minArea
        self.downsampling = downsampling
        self.drawBoxes = drawBoxes
        self.writeImages = writeImages
        self.min_sharpness = min_sharpness
        self.learningRate = learningRate
        self.debug = debug
        #print("self.saturation : "  + str(self.saturation))
        #print("self.downsampling : "  + str(self.downsampling))
        self.bpd = BasicPartsDetector(cameraname=self.cameraname,
                                      saturation=self.saturation,
                                      downsampling=self.downsampling,
                                      drawBoxes=self.drawBoxes,
                                      writeImages=self.writeImages,
                                      min_sharpness=self.min_sharpness,
                                      learningRate=self.learningRate,
                                      debug=self.debug)

        self.writeImages = writeImages
        self.newPartCounter = 1
        self.count = 1
        self.part_id = None
        self.sql = SQLModel(run_id)

        self.threshold_minx1 = (1920 / 334)
        self.threshold_minx2 = (1920 / 1568)
        self.threshold_miny = (1080 / 252)

        self.threshold_maxx1 = (1920 / 1)
        self.threshold_maxx2 = (1920 / 1920)
        self.threshold_maxy = (1080 / 1030)
        self.ct = CentroidTracker()

    def calibrate(self, frame):
        pass

    def mask(self, frame):
        height, width = frame.shape[:2]
        mask = np.zeros(frame.shape, dtype=np.uint8)

        x1 = int(width / (1920 / 1553))
        y1 = int(height / (1080 / 215))
        x12 = int(width / (1920 / 1920))
        y12 = int(height / (1080 / 1080))
        x2 = int(width / (1920 / 1))
        y2 = int(height / (1080 / 1080))
        x3 = int(width / (1920 / 345))
        y3 = int(height / (1080 / 215))

        roi_corners = np.array([[(x1, y1), (x12, y12), (x2, y2), (x3, y3)]],
                               dtype=np.int32)
        channel_count = frame.shape[2]  # i.e. 3 or 4 depending on your image
        ignore_mask_color = (255, ) * channel_count
        cv2.fillPoly(mask, roi_corners, ignore_mask_color)

        # apply the mask
        frame = cv2.bitwise_and(frame, mask)
        return frame

    def getPartimages(self, frame):
        frame, partimages = (self.bpd.getPartimages(self.mask(frame), frame,
                                                    self.minArea,
                                                    self.cameraname))
        return frame, partimages

    def trackObjects(self, partimages):
        rects = []
        if len(partimages) > 0:
            #print("len(partimages) : " + str(len(partimages)))
            for image in partimages:
                #print("len(partimage) : " + str(len(partimage)))
                box = np.array([
                    image.x, image.y, image.x + image.w, image.y + image.h,
                    image.color
                ])
                rects.append(box)

        objects = self.ct.update(rects)
        return objects

    def drawTreshholds(self, frame):
        height, width = frame.shape[:2]
        cv2.putText(frame, "res: " + str(height) + " x " + str(width),
                    (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
        cv2.putText(frame, "ymin = " + str(int(height / self.threshold_miny)),
                    (int(width / self.threshold_minx1) + 10,
                     int(height / self.threshold_miny) - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
        cv2.line(frame, (int(
            width / self.threshold_minx1), int(height / self.threshold_miny)),
                 (int(width / self.threshold_minx2),
                  int(height / self.threshold_miny)), (255, 0, 0), 1)
        cv2.putText(frame, "ymax = " + str(int(height / self.threshold_maxy)),
                    (int(width / self.threshold_maxx1) + 10,
                     int(height / self.threshold_maxy) - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
        cv2.line(frame, (int(
            width / self.threshold_maxx1), int(height / self.threshold_maxy)),
                 (int(width / self.threshold_maxx2),
                  int(height / self.threshold_maxy)), (255, 0, 0), 1)
        return frame

    def drawColoredBox(self, frame, image, color):
        if self.debug:
            cv2.rectangle(frame, [image.x, image.y, image.w, image.h], color,
                          5)
        return frame

    def checkImage(self, image, frame):
        return self.checkboundaries(image, frame)

    def checkboundaries(self, image, frame):
        #return frame, False
        height, _ = frame.shape[:2]

        if self.checkPoint(height, image.y) and self.checkPoint(
                height, image.y + image.h):

            frame = self.drawColoredBox(frame, image, (0, 255, 0))
            return frame, True
        else:
            frame = self.drawColoredBox(frame, image, (0, 0, 255))
            return frame, False

    def checkPoint(self, height, y):
        if (y > int(height / self.threshold_miny)
                and y < int(height / self.threshold_maxy)):
            #if self.debug:
            #print("usb: image in boundaries, y = " + str(y))
            return True
        else:
            #if self.debug:
            #if y < int(height/self.threshold_miny):
            #	print("usb: y < self.threshold_miny :" + str(y) + " < " + str(int(height/self.threshold_miny)))
            #if y > int(height/self.threshold_maxy):
            #	print("usb: y > self.threshold_maxy :" + str(y) + " > " + str(int(height/self.threshold_maxy)))
            return False

    def getCameraName(self):
        return self.cameraname
def countVehicles(param):
    # param -> path of the video
    # list -> number of vehicles will be written in the list
    # index ->Index at which data has to be written

    tf.disable_v2_behavior()

    # Image size must be '416x416' as YoloV3 network expects that specific image size as input
    img_size = 416
    inputs = tf.placeholder(tf.float32, [None, img_size, img_size, 3])
    model = nets.YOLOv3COCO(inputs, nets.Darknet19)

    ct = CentroidTracker(
        maxDisappeared=5, maxDistance=50
    )  # Look into 'CentroidTracker' for further info about parameters
    trackers = []  # List of all dlib trackers
    trackableObjects = {
    }  # Dictionary of trackable objects containing object's ID and its' corresponding centroid/s
    skip_frames = 10  # Numbers of frames to skip from detecting
    confidence_level = 0.40  # The confidence level of a detection
    total = 0  # Total number of detected objects from classes of interest
    use_original_video_size_as_output_size = True  # Shows original video as output and not the 416x416 image that is used as yolov3 input (NOTE: Detection still happens with 416x416 img size but the output is displayed in original video size if this parameter is True)

    video_path = os.getcwd() + param  # "/videos/4.mp4"
    video_name = os.path.basename(video_path)

    # print("Loading video {video_path}...".format(video_path=video_path))
    if not os.path.exists(video_path):
        print("File does not exist. Exited.")
        exit()

    # YoloV3 detects 80 classes represented below
    all_classes = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", \
         "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", \
         "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", \
         "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", \
         "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", \
         "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", \
         "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", \
         "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", \
         "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", \
         "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]

    # Classes of interest (with their corresponding indexes for easier looping)
    classes = {1: 'bicycle', 2: 'car', 3: 'motorbike', 5: 'bus', 7: 'truck'}

    with tf.Session() as sess:
        sess.run(model.pretrained())
        cap = cv2.VideoCapture(video_path)

        # Get video size (just for log purposes)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

        # Scale used for output window size and net size
        width_scale = 1
        height_scale = 1

        if use_original_video_size_as_output_size:
            width_scale = width / img_size
            height_scale = height / img_size

        def drawRectangleCV2(img,
                             pt1,
                             pt2,
                             color,
                             thickness,
                             width_scale=width_scale,
                             height_scale=height_scale):
            point1 = (int(pt1[0] * width_scale), int(pt1[1] * height_scale))
            point2 = (int(pt2[0] * width_scale), int(pt2[1] * height_scale))
            return cv2.rectangle(img, point1, point2, color, thickness)

        def drawTextCV2(img,
                        text,
                        pt,
                        font,
                        font_scale,
                        color,
                        lineType,
                        width_scale=width_scale,
                        height_scale=height_scale):
            pt = (int(pt[0] * width_scale), int(pt[1] * height_scale))
            cv2.putText(img, text, pt, font, font_scale, color, lineType)

        def drawCircleCV2(img,
                          center,
                          radius,
                          color,
                          thickness,
                          width_scale=width_scale,
                          height_scale=height_scale):
            center = (int(center[0] * width_scale),
                      int(center[1] * height_scale))
            cv2.circle(img, center, radius, color, thickness)

        # Python 3.5.6 does not support f-strings (next line will generate syntax error)
        #print(f"Loaded {video_path}. Width: {width}, Height: {height}")
        # print("Loaded {video_path}. Width: {width}, Height: {height}".format(video_path=video_path, width=width, height=height))

        skipped_frames_counter = 0

        while (cap.isOpened()):
            try:
                ret, frame = cap.read()
                img = cv2.resize(frame, (img_size, img_size))
            except:
                print(total_str)

            output_img = frame if use_original_video_size_as_output_size else img

            tracker_rects = []

            if skipped_frames_counter == skip_frames:

                # Detecting happens after number of frames have passes specified by 'skip_frames' variable value
                # print("[DETECTING]")

                trackers = []
                skipped_frames_counter = 0  # reset counter

                np_img = np.array(img).reshape(-1, img_size, img_size, 3)

                start_time = time.time()
                predictions = sess.run(model.preds,
                                       {inputs: model.preprocess(np_img)})
                # print("Detection took %s seconds" % (time.time() - start_time))

                # model.get_boxes returns a 80 element array containing information about detected classes
                # each element contains a list of detected boxes, confidence level ...
                detections = model.get_boxes(predictions, np_img.shape[1:3])
                np_detections = np.array(detections)

                # Loop only through classes we are interested in
                for class_index in classes.keys():
                    local_count = 0
                    class_name = classes[class_index]

                    # Loop through detected infos of a class we are interested in
                    for i in range(len(np_detections[class_index])):
                        box = np_detections[class_index][i]

                        if np_detections[class_index][i][4] >= confidence_level:
                            # print("Detected ", class_name, " with confidence of ", np_detections[class_index][i][4])

                            local_count += 1
                            startX, startY, endX, endY = box[0], box[1], box[
                                2], box[3]

                            drawRectangleCV2(output_img, (startX, startY),
                                             (endX, endY), (0, 255, 0), 1)
                            drawTextCV2(output_img, class_name,
                                        (startX, startY),
                                        cv2.FONT_HERSHEY_SIMPLEX, .5,
                                        (0, 0, 255), 1)

                            # Construct a dlib rectangle object from the bounding box coordinates and then start the dlib correlation
                            tracker = dlib.correlation_tracker()
                            rect = dlib.rectangle(int(startX), int(startY),
                                                  int(endX), int(endY))
                            tracker.start_track(img, rect)

                            # Add the tracker to our list of trackers so we can utilize it during skip frames
                            trackers.append(tracker)

                    # Write the total number of detected objects for a given class on this frame
                    # print(class_name," : ", local_count)
            else:

                # If detection is not happening then track previously detected objects (if any)
                # print("[TRACKING]")

                skipped_frames_counter += 1  # Increase the number frames for which we did not use detection

                # Loop through tracker, update each of them and display their rectangle
                for tracker in trackers:
                    tracker.update(img)
                    pos = tracker.get_position()

                    # Unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    # Add the bounding box coordinates to the tracking rectangles list
                    tracker_rects.append((startX, startY, endX, endY))

                    # Draw tracking rectangles
                    drawRectangleCV2(output_img, (startX, startY),
                                     (endX, endY), (255, 0, 0), 1)

            # Use the centroid tracker to associate the (1) old object centroids with (2) the newly computed object centroids
            objects = ct.update(tracker_rects)

            # Loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # Check to see if a trackable object exists for the current object ID
                to = trackableObjects.get(objectID, None)

                if to is None:
                    # If there is no existing trackable object, create one
                    to = TrackableObject(objectID, centroid)
                else:
                    to.centroids.append(centroid)

                    # If the object has not been counted, count it and mark it as counted
                    if not to.counted:
                        total += 1
                        to.counted = True

                # Store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # Draw both the ID of the object and the centroid of the object on the output frame
                object_id = "ID {}".format(objectID)
                drawTextCV2(output_img, object_id,
                            (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
                drawCircleCV2(output_img, (centroid[0], centroid[1]), 2,
                              (0, 255, 0), -1)

                # Display the total count so far
                total_str = str(total)
                drawTextCV2(output_img, total_str, (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # Display the current frame (with all annotations drawn up to this point)
            cv2.imshow(video_name, output_img)

            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):  # QUIT (exits)
                break
            elif key == ord('p'):
                cv2.waitKey(0)  # PAUSE (Enter any key to continue)

    cap.release()
    cv2.destroyAllWindows()
    print("Exited")
    """
Exemplo n.º 6
0
import face_recognition
import threading
import requests
import _thread
import imutils
import time
import sys
import cv2
import os
from tracking.centroidtracker import CentroidTracker

ct = CentroidTracker()

MODEL_MEAN_VALUES = (124.895847746, 87.7689143744, 81.4263377603)
genderList = ['Male', 'Female']


class camer_read(threading.Thread):
    def __init__(self, faceCascade, genderNet):
        threading.Thread.__init__(self)
        self.video_capture = cv2.VideoCapture(0)
        self.faceCascade = faceCascade
        self.genderNet = genderNet
        self.save_time = 1
        self.load_model = 1
        self.face_name = []
        self.locations = []
        self.image = []
        self.exit = 0
        self.color = (0, 255, 0)
Exemplo n.º 7
0
    outputFile = args.video[:-4] + '_yolo_out_py.avi'
else:
    # Webcam input
    cap = cv.VideoCapture(0)

# Get the video writer initialized to save the output video
if (not args.image):
    vid_writer = cv.VideoWriter(outputFile,
                                cv.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,
                                (round(cap.get(cv.CAP_PROP_FRAME_WIDTH)),
                                 round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))

# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=60, maxDistance=50)
trackers = []
trackableObjects = {}
total = 0

# start the frames per second throughput estimator
fps = FPS().start()
totalFrames = 0

while True:

    # get frame from the video
    hasFrame, frame = cap.read()

    #converting frame form BGR to RGB for dlib
    rgb = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
Exemplo n.º 8
0
def counter(filenameOpen, filenameSave):
    # 0.55,30 for örnek1-2  ; 0.5,5 for örnek3 ;0.45,11 for örnek4 ;0,55,20 for örnek5 ;
    defaultConfidence = 0.55  #threshold value
    defaultSkipFrames = 20  #skipped frame
    W = None
    H = None
    writer = None

    # loading model-artificial neural networks
    net = cv2.dnn.readNetFromCaffe(
        "mobilenet_ssd/MobileNetSSD_deploy.prototxt",
        "mobilenet_ssd/MobileNetSSD_deploy.caffemodel")

    print("Video yükleniyor..")
    vs = cv2.VideoCapture(filenameOpen)

    #each id is given to store objects.25,20 for örnek 6; others 40,50 ;
    ct = CentroidTracker(
        maxDisappeared=25,
        maxDistance=20)  #boundary lines of objects are determined
    trackers = []
    trackableObjects = {}

    # variables are determined
    totalFrames = 0
    totalDown = 0
    totalUp = 0

    #fps output estimator per second
    fps = FPS().start()
    stat = {}

    while True:
        # VideoCapture
        ok, frame = vs.read()

        if filenameOpen is not None and frame is None:
            break

        #we resized the frame to a maximum of 500 pixels.
        #(the less data it has, the faster we can process it),
        #then convert the frame for dlib from BGR to RGB.

        frame = cv2.resize(frame, (640, 480))
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        #frame sizes are adjusted according to the video.
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        #to save the file
        if filenameSave is not None and writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(filenameSave, fourcc, 30, (W, H), True)

        status = "Bekleniyor"
        rects = []

        #we set the sampling time to avoid tiring the processor
        videotime = vs.get(cv2.CAP_PROP_POS_MSEC) / 1000
        summ = totalUp + totalDown

        if totalFrames % 50 == 0:
            stat["{:.4s}".format(str(videotime))] = str(summ)
            #input to print total number
            #print("{:.4s}".format(str(videotime)) + " people: " + str(summ))

        if totalFrames % defaultSkipFrames == 0:
            # set the status and initialize our new set of object trackers
            status = "Bulundu"
            trackers = []

            #the part where we convert the frame dimensions to blob
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            #all detected objects
            for i in np.arange(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]

                #is the forecast value greater than the threshold value?
                if confidence > defaultConfidence:
                    idx = int(detections[0, 0, i, 1])

                    #ignore if not human
                    if CLASSES[idx] != "person":
                        continue

                    #frame the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)
                    trackers.append(tracker)

        else:
            #loop over the trackers
            for tracker in trackers:
                status = "Takip"

                #update the viewer and capture the updated location
                tracker.update(rgb)
                pos = tracker.get_position()

                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                #bounding box coordinates added to list of rectangles
                rects.append((startX, startY, endX, endY))

        #A line is drawn to see if they go up or down
        #If it changes here, line 149 needs to be corrected.
        #cv2.line(frame, (0, 0), (W, H), (0, 255, 255), 2)#cross
        cv2.line(frame, (0, H // 2), (W, H // 2), (255, 255, 0),
                 2)  #horizontal
        #cv2.line(frame, (W//2, 0), (W//2, H), (0, 255, 255), 2)  #vertical

        #associating objects.
        objects = ct.update(rects)

        #object is looped
        for (objectID, centroid) in objects.items():
            to = trackableObjects.get(objectID, None)
            if to is None:
                to = TrackableObject(objectID, centroid)

            #this function is entered to determine the direction.
            else:
                #depending on the direction of movement, whether it is up or down
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                if not to.counted:
                    if direction < 0 and centroid[1] < (H) // 2:
                        totalUp += 1
                        to.counted = True

                    elif direction > 0 and centroid[1] > (H) // 2:
                        totalDown += 1
                        to.counted = True

            #traceable object is given id
            trackableObjects[objectID] = to

            #the center of the object is selected and the id number is written
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 255), -1)

        #screen printouts
        info = [("Yukari", totalUp), ("Asagi", totalDown),
                ("Sure", "{:.2f}".format(videotime))]
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2)

        #checking if the video has been saved
        if writer is not None:
            writer.write(frame)

        # show the output frame
        cv2.imshow(
            "Ege Universitesi Bitirme Tezi(Caner YILDIRIM-Emir Kaan YERLI)",
            frame)
        key = cv2.waitKey(1) & 0xFF

        #exit the loop
        if key == ord("q") or key == ord("Q"):
            break
        totalFrames += 1
        fps.update()

    fps.stop()

    #video recording is ending
    if writer is not None:
        writer.release()

    #if we are not using a video file, stop the camera video stream
    if not filenameOpen:
        vs.stop()

    #otherwise, release the video file pointer
    else:
        vs.release()

    #close the windows
    cv2.destroyAllWindows()

    return info, stat
Exemplo n.º 9
0
def counter(filenameOpen, filenameSave):
    # construct the argument parse and parse the arguments
    # ap = argparse.ArgumentParser()
    # ap.add_argument("-i", "--input", type=str,
    # help="path to optional input video file")
    # ap.add_argument("-o", "--output", type=str,
    # help="path to optional output video file")
    # args = vars(ap.parse_args())

    defaultConfidence = 0.4  # минимальный процент вероятности обнаружения
    defaultSkipFrames = 30  # пропущенное количество кадров между обнаружениями
    W = None  # размеры кадра
    H = None
    writer = None

    # загрузка модели
    net = cv2.dnn.readNetFromCaffe(
        "mobilenet_ssd/MobileNetSSD_deploy.prototxt",
        "mobilenet_ssd/MobileNetSSD_deploy.caffemodel")

    # if not args.get("input", False): # если отсутствует путь к видео -- захватить видео с веб-камеры
    # 	print("[INFO] starting video stream...")
    # 	vs = VideoStream(src=0).start()
    # 	time.sleep(2.0)

    # else: # в противном случае взять видеофайл
    print("[INFO] opening video file...")
    vs = cv2.VideoCapture(filenameOpen)

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = 0
    totalUp = 0

    # start the frames per second throughput estimator
    fps = FPS().start()

    stat = {}

    # loop over frames from the video stream
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        ok, frame = vs.read()
        # frame = frame[1] if args.get("input", False) else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if filenameOpen is not None and frame is None:
            break

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib

        frame = cv2.resize(frame, (640, 480))

        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if filenameSave is not None and writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(filenameSave, fourcc, 30, (W, H), True)

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker

        videotime = vs.get(cv2.CAP_PROP_POS_MSEC) / 1000
        summ = totalUp + totalDown

        if totalFrames % 50 == 0:
            stat["{:.4s}".format(str(videotime))] = str(summ)
        # print("{:.4s}".format(str(videotime)) + " people: " + str(summ))

        if totalFrames % defaultSkipFrames == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > defaultConfidence:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if classes.CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'

        # cv2.line(frame, (0, 0), (W, H), (0, 255, 255), 2)
        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        # construct a tuple of information we will be displaying on the
        # frame
        info = [("Up", totalUp), ("Down", totalDown),
                ("Time", "{:.4f}".format(videotime))]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # check to see if we should write the frame to disk
        if writer is not None:
            writer.write(frame)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

    # stop the timer and display FPS information
    fps.stop()

    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()

    # if we are not using a video file, stop the camera video stream
    if not filenameOpen:
        vs.stop()

    # otherwise, release the video file pointer
    else:
        vs.release()

    # close any open windows
    cv2.destroyAllWindows()

    # print(stat)

    return info, stat