示例#1
0
def create_tracker(frame, roi, use_dlib=False):
    if use_dlib:
        tracker = dlib.correlation_tracker()        
    else:
        tracker = meanshiftTracker() 
    (roi_x1, roi_y1, roi_x2, roi_y2) = roi
    LOG.debug('create tracker received: {}'.format(roi))
    tracker.start_track(frame,
                        dlib.rectangle(roi_x1, roi_y1, roi_x2, roi_y2))
    return tracker
示例#2
0
    def createNewTracks(self, unassignedDetections, detection):
        global tracker_id
        print detection
        print unassignedDetections
        bboxes = detection[unassignedDetections, :]
        n = bboxes.shape[0]

        for i in xrange(n):
            bbox = bboxes[i, :].tolist()
            track = Track(tracker_id, bbox, dlib.correlation_tracker())
            tracker_id += 1
            self.tracks.append(track)
示例#3
0
    def detect(self):
        faces = self._wait_for_faces()
        logger.debug('Found %d faces at locations %s.',
                     len(faces),
                     ', '.join(map(lambda x: str(x.location), faces)))

        if len(faces) == 0:
            return []

        # We care only about the first person in the picture for now
        face = faces[0]
        x, y, w, h = map(int, face.location)
        face_rectangle = dlib.rectangle(x, y, x+w, y+h)

        # This will eventually be returned from this function. List of all
        # faces for this person:
        face_series = [face]

        # Initialize the object tracker:
        self.object_tracker = dlib.correlation_tracker()
        self.object_tracker.start_track(face.frame, face_rectangle)

        logger.info("Starting tracking using dlib.correlation_tracker.")
        while True:
            frame = self.capture_frame()
            psr = self.object_tracker.update(frame)
            if psr < 8.0:
                return face_series

            position = self.object_tracker.get_position()
            rectangle = (int(position.left()),
                         int(position.top()),
                         int(position.width()),
                         int(position.height()))

            face_series.append(ImageLocation(frame, rectangle))
            self.draw_rect(frame, rectangle)
            if self.show_preview:
                cv2.imshow('Preview', frame)
                if cv2.waitKey(1) & 0xFF in map(ord, list('cq')):
                    return face_series
示例#4
0
def main():
    videofile = sys.argv[1]
    print('[INFO] Reading %s' % videofile )
    vid = imageio.get_reader( videofile )
    nFrames = 0
    frames = []
    for i in range(1000, 3000):
        img = vid.get_data( i )
        frames.append( img )
        nFrames += 1
    print('[INFO] Loaded all frames' )

    bbox_ = get_rectangle( frames[0] )
    # Create the correlation tracker - the object needs to be initialized
    # before it can be used
    tracker = dlib.correlation_tracker()

    win = dlib.image_window()
    # We will track the frames as we load them off of disk

    for k, img in enumerate(frames):
        print("Processing Frame {}".format(k))
        # We need to initialize the tracker on the first frame
        if k == 0:
            # Start a track on the juice box. If you look at the first frame you
            # will see that the juice box is contained within the bounding
            # box (74, 67, 112, 153).
            (x0, y0), (x1, y1) = bbox_
            tracker.start_track(img, dlib.rectangle( x0, y0, x1, y1 ))
        else:
            # Else we just attempt to track from the previous frame
            tracker.update(img)

        win.clear_overlay()
        win.set_image(img)
        win.add_overlay(tracker.get_position())
                    people_x.append(person_conf_multi[people_i][point_i][0])
                    people_y.append(person_conf_multi[people_i][point_i][1])
            if i == 0:
                target_points.append((int(min(people_x)), int(min(people_y)),
                                      int(max(people_x)), int(max(people_y))))
            else:
                is_new_person = True
                for k in range(len(tracker)):
                    rect = tracker[k].get_position()
                    if np.mean(people_x) < rect.right() and np.mean(
                            people_x) > rect.left() and np.mean(
                                people_y) < rect.bottom() and np.mean(
                                    people_y) > rect.top():
                        is_new_person = False
                if is_new_person == True:
                    tracker.append(dlib.correlation_tracker())
                    print('is_new_person!')
                    rect_temp = []
                    rect_temp.append((int(min(people_x)), int(min(people_y)),
                                      int(max(people_x)), int(max(people_y))))
                    [
                        tracker[i + len(tracker) - 1].start_track(
                            image, dlib.rectangle(*rect))
                        for i, rect in enumerate(rect_temp)
                    ]

    ##########

    if i == 0:
        # Initial co-ordinates of the object to be tracked
        # Create the tracker object
示例#6
0
    def __init__(self, *args, **kwargs):
        tk.Tk.__init__(self, *args, **kwargs)

	#set title you can do it in main as well
	self.title("Data annotation")
	
        # create a canvas
        self.canvas = tk.Canvas(width=860, height=640)
        self.canvas.pack(fill="both", expand=True)
	
	# maybe setup main folder here like $TOOLS
	
	#self.video_folder = os.path.abspath("/home/stefan/Documents/plexondata/2015_12_02_rat10/output1/")
        self.video_folder = os.path.abspath("/home/stefan/Documents/paw_tracking/frames")
        self.save_folder_patches = os.path.abspath("/home/stefan/Documents/paw_tracking/patches")
        self.save_folder_txt = os.path.abspath("/home/stefan/Documents/paw_tracking")
        
        # create an image
        
		
	# prevent image from garbage collection using self
	# create image counter
	self.img_num = 0
	
	# read all images
	self.images = [] 
	self.images_raw = []
        self._read_all_images()
	self.rectangle_frame_pairs = [0]*len(self.images_raw)
	
	#self.img = ImageTk.PhotoImage(Image.open(os.path.join(self.video_folder, "{0}.jpeg".format(self.img_num+1))))
        
        self.img_id = self.canvas.create_image(100, 100, image = self.images[self.img_num], anchor="nw") #, anchor = NW
	
	# this data is used to keep track of an 
        # item being dragged
        self._drag_data = {"x": 0, "y": 0, "item": None}
	
        # create a couple movable objects
        self.polygon_id = 0
        
        # rectangle size in x and y
        # rectangle_size = [100, 40]
        self.rectangle_size = [100, 50]
        
        self._create_token((100, 100), "red", self.rectangle_size)
        #self._create_token((200, 100), "black")
               

        # put image in label
	#panel = tk.Label(self.canvas, image = self.img)
	#panel.place(x=150,y=150)
	#panel.pack()
	
	
	
	#self.canvas.tag_lower(panel)
	#self.canvas.tag_lower(self.img)
	
        # add bindings for clicking, dragging and releasing over
        # any object with the "token" tag
        self.canvas.tag_bind("token", "<ButtonPress-1>", self.OnTokenButtonPress)
        self.canvas.tag_bind("token", "<ButtonRelease-1>", self.OnTokenButtonRelease)
        self.canvas.tag_bind("token", "<B1-Motion>", self.OnTokenMotion)
        
        # add bindings for arrow keys when changing the image to right
        self.canvas.bind("<Return>", self.returnKey)
        self.canvas.bind("<Right>", self.rightKey)
        self.canvas.bind("<Left>", self.leftKey)
        self.canvas.focus_set()
        
	#self.canvas.bind("<ButtonPress-1>", self.OnTokenButtonPress)
	#self.canvas.bind("<ButtonRelease-1>", self.OnTokenButtonRelease)
	#self.canvas.bind("<B1-Motion>", self.OnTokenMotion)
	
	# initialize tracker
	self.tracker = dlib.correlation_tracker()
	self.prev_tracker = self.tracker
	self.flag = 0
	
	'''create buttons section'''
	
	#add quit button
        button1 = tk.Button(self.canvas, text = "Quit", command = self.quit,
                                                            anchor = "w")
        button1.configure(width = 10)
        button1.pack()
        button1_window = self.canvas.create_window(10, 10, anchor="nw", window=button1)
	
	button2 = tk.Button(self.canvas, text = "Save annotations", command = self.save, anchor = "w")
	button2.configure(width = 15)
	button2.pack()
	button2_window = self.canvas.create_window(150, 10, anchor="nw", window=button2)
	
	#check if inbetwen save and load the space is same
	button3 = tk.Button(self.canvas, text = "Load annotations", command = self.load, anchor = "w")
	button3.configure(width = 15)
	button3.pack()
	button3_window = self.canvas.create_window(330, 10, anchor="nw", window=button3)
	
	button4 = tk.Button(self.canvas, text = "Extract patches", command = self.extract_patches, anchor = "w")
	button4.configure(width = 15)
	button4.pack()
	button4_window = self.canvas.create_window(530, 10, anchor="nw", window=button4)
    def detectFace(self):
        # Initialize some useful arguments
        cosine_threshold = 0.8
        proba_threshold = 0.85
        comparing_num = 5
        trackers = []
        texts = []
        frames = 0

        # Start streaming and recording
        cap = cv2.VideoCapture(0)
        frame_width = int(cap.get(3))
        frame_height = int(cap.get(4))
        print(str(frame_width) + " : " + str(frame_height))
        save_width = 800
        save_height = int(800 / frame_width * frame_height)

        while True:
            ret, frame = cap.read()
            frames += 1
            frame = cv2.resize(frame, (save_width, save_height))
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            if frames % 3 == 0:
                trackers = []
                texts = []

                bboxes = self.detector.detect_faces(frame)

                if len(bboxes) != 0:

                    for bboxe in bboxes:
                        bbox = bboxe['box']
                        bbox = np.array([
                            bbox[0], bbox[1], bbox[0] + bbox[2],
                            bbox[1] + bbox[3]
                        ])
                        landmarks = bboxe['keypoints']
                        landmarks = np.array([
                            landmarks["left_eye"][0],
                            landmarks["right_eye"][0], landmarks["nose"][0],
                            landmarks["mouth_left"][0],
                            landmarks["mouth_right"][0],
                            landmarks["left_eye"][1],
                            landmarks["right_eye"][1], landmarks["nose"][1],
                            landmarks["mouth_left"][1],
                            landmarks["mouth_right"][1]
                        ])
                        landmarks = landmarks.reshape((2, 5)).T
                        nimg = face_preprocess.preprocess(frame,
                                                          bbox,
                                                          landmarks,
                                                          image_size='112,112')
                        nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
                        nimg = np.transpose(nimg, (2, 0, 1))
                        embedding = self.embedding_model.get_feature(
                            nimg).reshape(1, -1)

                        text = "Unknown"

                        # Predict class
                        preds = self.model.predict(embedding)
                        preds = preds.flatten()
                        # Get the highest accuracy embedded vector
                        j = np.argmax(preds)
                        proba = preds[j]
                        # Compare this vector to source class vectors to verify it is actual belong to this class
                        match_class_idx = (self.labels == j)
                        match_class_idx = np.where(match_class_idx)[0]
                        selected_idx = np.random.choice(
                            match_class_idx, comparing_num)
                        compare_embeddings = self.embeddings[selected_idx]
                        # Calculate cosine similarity
                        cos_similarity = self.CosineSimilarity(
                            embedding, compare_embeddings)
                        if cos_similarity < cosine_threshold and proba > proba_threshold:
                            name = self.le.classes_[j]
                            text = "{}".format(name)
                            print("Recognized: {} <{:.2f}>".format(
                                name, proba * 100))
                        # Start tracking
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(bbox[0], bbox[1], bbox[2],
                                              bbox[3])
                        tracker.start_track(rgb, rect)
                        trackers.append(tracker)
                        texts.append(text)

                        y = bbox[1] - 10 if bbox[1] - 10 > 10 else bbox[1] + 10
                        cv2.putText(frame, text, (bbox[0], y),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.95,
                                    (255, 255, 255), 1)
                        cv2.rectangle(frame, (bbox[0], bbox[1]),
                                      (bbox[2], bbox[3]), (179, 0, 149), 4)
            else:
                for tracker, text in zip(trackers, texts):
                    pos = tracker.get_position()

                    # unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  (179, 0, 149), 4)
                    cv2.putText(frame, text, (startX, startY - 15),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.95,
                                (255, 255, 255), 1)

            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            if key == ord("q"):
                break

        cap.release()
        cv2.destroyAllWindows()
                # Get the highest accuracy embedded vector
                j = np.argmax(preds)
                proba = preds[j]
                # Compare this vector to source class vectors to verify it is actual belong to this class
                match_class_idx = (labels == j)
                match_class_idx = np.where(match_class_idx)[0]
                selected_idx = np.random.choice(match_class_idx, comparing_num)
                compare_embeddings = embeddings[selected_idx]
                # Calculate cosine similarity
                cos_similarity = CosineSimilarity(embedding, compare_embeddings)
                if cos_similarity < cosine_threshold and proba > proba_threshold:
                    name = le.classes_[j]
                    text = "{}".format(name)
                    print("Recognized: {} <{:.2f}>".format(name, proba*100))
                # Start tracking
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(bbox[0], bbox[1], bbox[2], bbox[3])
                tracker.start_track(rgb, rect)
                trackers.append(tracker)
                texts.append(text)

                y = bbox[1] - 10 if bbox[1] - 10 > 10 else bbox[1] + 10
                cv2.putText(frame, text, (bbox[0], y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
                cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255,0,0), 2)
    else:
        for tracker, text in zip(trackers,texts):
            pos = tracker.get_position()

            # unpack the position object
            startX = int(pos.left())
            startY = int(pos.top())
示例#9
0
def run(source=0, dispLoc=False):
    # Create the VideoCapture object
    cam = cv2.VideoCapture()
    cam.open(source)
    # If Camera Device is not opened, exit the program
    if not cam.isOpened():
        print "Video device or file couldn't be opened"
        exit()
    


    print "Press key `p` to pause the video to start tracking"
    while True:
        # Retrieve an image and Display it.
        retval, img = cam.read()
        if not retval:
            print "Cannot capture frame device"
            exit()
        if(cv2.waitKey(10)==ord('p')):
            break
        #cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
        cv2.namedWindow("Image")
        cv2.imshow("Image", img)
    cv2.destroyWindow("Image")

    # Co-ordinates of objects to be tracked 
    # will be stored in a list named `points`
    points = get_points.run(img) 

    deadzone_height = numpy.size(img, 0)/2
    deadzone_width = numpy.size(img, 1)/2

    print ('width =  ',deadzone_height ,'height =  ',deadzone_width)

    if not points:
        print "ERROR: No object to be tracked."
        exit()
    
    cv2.namedWindow("Image")
    cv2.imshow("Image", img)

    # Initial co-ordinates of the object to be tracked 
    # Create the tracker object
    tracker = dlib.correlation_tracker()
    # Provide the tracker the initial position of the object
    tracker.start_track(img, dlib.rectangle(*points[0]))

    while True:
        # Read frame from device or file
        retval, img = cam.read()
        if not retval:
            print "Cannot capture frame device | CODE TERMINATING :("
            exit()
        # Update the tracker  
        tracker.update(img)
        # Get the position of the object, draw a 
        # bounding box around it and display it.
        rect = tracker.get_position()
        pt1 = (int(rect.left()), int(rect.top()))
        pt2 = (int(rect.right()), int(rect.bottom()))
        center = ((pt1[0]+pt2[0])/2 , (pt1[1]+pt2[1])/2)
        print (center)

        if (deadzone_width - center[0]) > deadzone_width/2:
        	print('move left')
        	#send_ned_velocity(-1,0,0,5)
        	#time.sleep(5)

        if (deadzone_width - center[0]) < -deadzone_width/2:
        	print('move right')
        	#send_ned_velocity(1,0,0,5)
        	#time.sleep(5)

        if (deadzone_height - center[1]) > deadzone_height/2:
        	print('move forward')
        	#send_ned_velocity(0,1,0,5)
        	#time.sleep(5)
        if (deadzone_height - center[1]) < -deadzone_height/2:
        	print('move back')
        	#send_ned_velocity(0,-1,0,5)
        	#time.sleep(5)


        cv2.rectangle(img, pt1, pt2, (255, 255, 255), 3)
        #print "Object tracked at [{}, {}] \r".format(pt1, pt2),
        if dispLoc:
            loc = (int(rect.left()), int(rect.top()-20))
            txt = "Object tracked at [{}, {}]".format(pt1, pt2)
            cv2.putText(img, txt, loc , cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255), 1)
        cv2.namedWindow("Image")
        cv2.imshow("Image", img)
        
        # Continue until the user presses ESC key
        if cv2.waitKey(1) == 27:
        	print("Setting LAND mode...")
        	vehicle.mode = VehicleMode("LAND")
        	#Close vehicle object before exiting script
        	print "Close vehicle object"
        	vehicle.close()
        	print("Completed")
        	break

    # Relase the VideoCapture object
    cam.release()
示例#10
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    cctvpath = "rtsp://10.1.1.10:554/rtsp_live/profile_token_0"
    cap = cv2.VideoCapture(cctvpath)
    #cap = cv2.VideoCapture('test_videos/v2.mp4')

    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    totalExit = 0
    totalEnterance = 0

    fps = FPS().start()

    # loop over frames from the video stream
    while True:

        rects = []
        trackers = []

        r, img = cap.read()

        if img is None:
            break

        #img = cv2.resize(img, (1280, 720))
        img = imutils.resize(img, width=1024)
        rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        if W is None or H is None:
            (H, W) = img.shape[:2]

        borderPositionX = W - (W // 4)
        borderWidth = 40

        cv2.rectangle(img, (borderPositionX, 0),
                      (borderPositionX + borderWidth, H), (0, 255, 255), 2)

        boxes, scores, classes, num = odapi.processFrame(img)

        for i in range(len(boxes)):
            # Class 1 represents human
            if classes[i] == 1 and scores[i] > threshold:
                box = boxes[i]
                cv2.rectangle(img, (box[1], box[0]), (box[3], box[2]),
                              (255, 0, 0), 2)
                cv2.putText(img, 'Person : ' + str(round(scores[i], 2)),
                            (box[1], box[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (36, 255, 12), 2)

                # construct a dlib rectangle object from the bounding
                # box coordinates and then start the dlib correlation
                # tracker
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(box[1], box[0], box[3], box[2])
                tracker.start_track(rgb, rect)

                # add the tracker to our list of trackers so we can
                # utilize it during skip frames
                trackers.append(tracker)

        for tracker in trackers:
            # update the tracker and grab the updated position
            tracker.update(rgb)
            pos = tracker.get_position()

            # unpack the position object
            startX = int(pos.left())
            startY = int(pos.top())
            endX = int(pos.right())
            endY = int(pos.bottom())

            start = (startX, startY)
            end = (endX, endY)

            color = list(np.random.random(size=3) * 256)

            cv2.rectangle(img, start, end, color, 4)

            rects.append((startX, startY, endX, endY))

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID

            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)
            else:
                #y directions of the centroids
                y = [c[1] for c in to.centroids]

                #x directions of the centroids
                x = [c[0] for c in to.centroids]

                avgPosX = np.mean(x)
                startPositionX = centroid[0]

                direction = centroid[0] - avgPosX
                to.centroids.append(centroid)

                if not to.counted:
                    #if not counted and but re-recognized in the area and difference between intial position and last position
                    #its not between the border, do not increase the number.
                    if (borderPositionX > startPositionX and borderPositionX >
                            avgPosX) or (borderPositionX < startPositionX
                                         and borderPositionX < avgPosX):
                        pass
                    else:
                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        if direction < 0 and centroid[0] < borderPositionX:
                            totalEnterance += 1
                            to.counted = True

                        # if the direction is positive (indicating the object
                        # is moving down) AND the centroid is below the
                        # center line, count the object
                        elif direction > 0 and centroid[0] > borderPositionX:
                            totalExit += 1
                            to.counted = True

            text = "ID {}".format(objectID)
            cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

            trackableObjects[objectID] = to

        # construct a tuple of information we will be displaying on the
        # frame
        info = [("Enter", totalEnterance), ("Exit", totalExit),
                ("Person Inside", str(totalEnterance - totalExit))]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(img, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        fps.update()

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = img.copy()

    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
示例#11
0
def fn2():
    Prototxt = 'C:\\Users\\Kanchi\\PycharmProjects\\demo\\mobilenet_ssd\\MobileNetSSD_deploy.prototxt'
    model = 'C:\\Users\\Kanchi\\PycharmProjects\\demo\\mobilenet_ssd\\MobileNetSSD_deploy.caffemodel'
    Input = 'test\\test8.mp4'
    output = 'output\\output_01.avi'
    Confidence = 0.85
    skip_frames = 2
    info = []

    # construct the argument parse and parse the arguments
    # ap = argparse.ArgumentParser()
    # ap.add_argument("-p", "--prototxt", required=True,
    #                 help="path to Caffe 'deploy' prototxt file")
    # ap.add_argument("-m", "--model", required=True,
    #                 help="path to Caffe pre-trained model")
    # ap.add_argument("-i", "--input", type=str,
    #                 help="path to optional input video file")
    # ap.add_argument("-o", "--output", type=str,
    #                 help="path to optional output video file")
    # ap.add_argument("-c", "--confidence", type=float, default=0.4,
    #                 help="minimum probability to filter weak detections")
    # ap.add_argument("-s", "--skip-frames", type=int, default=30,
    #                 help="# of skip frames between detections")
    # args = vars(ap.parse_args())

    # initialize the list of class labels MobileNet SSD was trained to
    # detect
    CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
               "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
               "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
               "sofa", "train", "tvmonitor"]

    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(Prototxt, model)

    # if a video path was not supplied, grab a reference to the webcam
    if not Input:
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        time.sleep(2.0)

    # otherwise, grab a reference to the video file
    else:
        print("[INFO] opening video file...")
        vs = cv2.VideoCapture(Input)

    # initialize the video writer (we'll instantiate later if need be)
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    # ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    ct = CentroidTracker(maxDisappeared=20, maxDistance=100)

    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = 0
    totalUp = 0
    right = 0
    people = 0
    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over frames from the video stream
    while True:

        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        frame = vs.read()
        frame = frame[1] if Input else frame

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if Input is not None and frame is None:
            break

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]
        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if output is not None and writer is None:
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(output, fourcc, 30,
                                     (W, H), True)

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % skip_frames == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > Confidence:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()

                    rect = dlib.rectangle(startX, startY, endX, endY)

                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers

            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                if (startX + endX) / 2 > 2 * W // 3:
                    cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 2)
                    # x1 = int((startX + endX) / 2)
                    # y1 = int((startY + endY) / 2)

                    # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        cv2.line(frame, (2 * W // 3, 0), (2 * W // 3, H), (0, 255, 255), 2)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)
        right = 0
        total = 0
        for i in objects.items():
            total += 1
            if i[1][0] > 2 * W // 3:
                right += 1

        people += total

        # loop over the tracked objects

        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)


            # otherwise, there is a trackable object so we can utilize it
            # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                x = [c[0] for c in to.centroids]

                # direction = centroid[0] - np.mean(x)
                to.centroids.append(centroid)
                # print(x)

                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    # if direction < 0 and centroid[1] < 2*H // 3:
                    # 	totalUp += 1
                    # 	to.counted = True

                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    # if centroid[1] < 2*H // 3:
                    # 	totalUp += 1
                    # 	to.counted = True
                    #
                    # if centroid[1] > 2*H // 3:
                    # 	totalDown += 1
                    # 	to.counted = True
                    # if centroid[0] < 2 * H // 3:
                    # 	totalUp += 1
                    # 	to.counted = True

                    if centroid[0] > 2 * W // 3:
                        totalDown += 1
                        to.counted = True

                # print(len(x))



                # print(x[len(x) - 1])
                # print(x[len(x) - 2])
                if x[len(x) - 2] > (2 * W // 3) and x[len(x) - 1] < (2 * W // 3):
                    print("------------------------------------------------subtracted")
                    totalDown -= 1
                    to.counted = True

            #
            # if x1 > 2 * H // 3:
            # 	totalDown += 1
            # to.counted = True

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        # construct a tuple of information we will be displaying on the
        # frame
        info = [
            ("Total", total),
            ("Right", right),
            ("Status", status)

        ]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # check to see if we should write the frame to disk
        if writer is not None:
            writer.write(frame)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

    queue = people // totalFrames
    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] Total Frames: ", totalFrames)
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    print("[INFO] Average Queue: ", queue)

    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()

    # if we are not using a video file, stop the camera video stream
    if not Input:
        vs.stop()

    # otherwise, release the video file pointer
    else:
        vs.release()

    # close any open windows
    cv2.destroyAllWindows()

    info.append(('queue',queue))

    print("info=======================================",info)

    return info
示例#12
0
    def _track(self, direction=FORWARD):
        """Actual tracking based on existing detections"""

        if direction == FORWARD:
            frame_cache = self._frame_cache
        elif direction == BACKWARD:
            frame_cache = reversed(self._frame_cache)
        else:
            raise NotImplementedError()

        self._trackers = {}
        self._confidences = {}
        self._previous = {}
        new_identifier = 0

        for t, frame in frame_cache:

            # update trackers & end those with low confidence
            for identifier, tracker in list(self._trackers.items()):
                confidence = tracker.update(frame)
                self._confidences[identifier] = confidence
                if confidence < self.track_min_confidence:
                    self._kill_tracker(identifier)

            # match trackers with detections at time t
            detections = [d for _, d, status in self._tracking_graph[t]
                          if status == DETECTION]
            match = self._associate(self._trackers, detections)

            # process all matched trackers
            for d, identifier in match.items():

                # connect the previous position of the tracker
                # to the (current) associated detection
                current = (t, detections[d], DETECTION)
                self._tracking_graph.add_edge(
                    self._previous[identifier], current,
                    confidence=self._confidences[identifier])

                # end the tracker
                self._kill_tracker(identifier)

            # process all unmatched trackers
            for identifier, tracker in self._trackers.items():

                # connect the previous position of the tracker
                # to the current position of the tracker
                position = tracker.get_position()
                position = (
                    position.left(),
                    position.top(),
                    position.right(),
                    position.bottom()
                )
                current = (t, position, direction)
                self._tracking_graph.add_edge(
                    self._previous[identifier], current,
                    confidence=self._confidences[identifier])

                # save current position of the tracker for next iteration
                self._previous[identifier] = current

            # start new trackers for all detections
            for d, detection in enumerate(detections):

                # start new tracker
                new_tracker = dlib.correlation_tracker()
                new_tracker.start_track(frame, dlib.drectangle(*detection))
                self._trackers[new_identifier] = new_tracker

                # save previous (t, position, status) tuple
                current = (t, detection, DETECTION)
                self._previous[new_identifier] = current

                # increment tracker identifier
                new_identifier = new_identifier + 1
示例#13
0
    def track(self):
        CLASSES = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]
        net = cv2.dnn.readNetFromCaffe(self.p, self.m)
        vs = VideoStream(src=0).start()
        writer = None
        W = None
        H = None
        ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        trackers = []
        trackableObjects = {}

        totalFrames = 0
        totalDown = 0
        totalUp = 0
        fps = FPS().start()

        while True:
            # grab the next frame and handle if we are reading from either
            # VideoCapture or VideoStream
            frame = vs.read()
            #frame = frame[1] if get(self.i, False) else frame

            # if we are viewing a video and we did not grab a frame then we
            # have reached the end of the video
            if self.i is not None and frame is None:
                break

            # resize the frame to have a maximum width of 500 pixels (the
            # less data we have, the faster we can process it), then convert
            # the frame from BGR to RGB for dlib
            frame = imutils.resize(frame, width=500)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if the frame dimensions are empty, set them
            if W is None or H is None:
                (H, W) = frame.shape[:2]

            # if we are supposed to be writing a video to disk, initialize
            # the writer
            if self.o is not None and writer is None:
                fourcc = cv2.VideoWriter_fourcc(*"MJPG")
                writer = cv2.VideoWriter(self.o, fourcc, 30, (W, H), True)

            # initialize the current status along with our list of bounding
            # box rectangles returned by either (1) our object detector or
            # (2) the correlation trackers
            status = "Waiting"
            rects = []

            # check to see if we should run a more computationally expensive
            # object detection method to aid our tracker
            if totalFrames % self.s == 0:
                # set the status and initialize our new set of object trackers
                status = "Detecting"
                trackers = []

                # convert the frame to a blob and pass the blob through the
                # network and obtain the detections
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
                net.setInput(blob)
                detections = net.forward()

                # loop over the detections
                for i in np.arange(0, detections.shape[2]):
                    # extract the confidence (i.e., probability) associated
                    # with the prediction
                    confidence = detections[0, 0, i, 2]

                    # filter out weak detections by requiring a minimum
                    # confidence
                    if confidence > self.c:
                        # extract the index of the class label from the
                        # detections list
                        idx = int(detections[0, 0, i, 1])

                        # if the class label is not a person, ignore it
                        if CLASSES[idx] != "person":
                            continue

                        # compute the (x, y)-coordinates of the bounding box
                        # for the object
                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                        (startX, startY, endX, endY) = box.astype("int")

                        # construct a dlib rectangle object from the bounding
                        # box coordinates and then start the dlib correlation
                        # tracker
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        # add the tracker to our list of trackers so we can
                        # utilize it during skip frames
                        trackers.append(tracker)

            # otherwise, we should utilize our object *trackers* rather than
            # object *detectors* to obtain a higher frame processing throughput
            else:
                # loop over the trackers
                for tracker in trackers:
                    # set the status of our system to be 'tracking' rather
                    # than 'waiting' or 'detecting'
                    status = "Tracking"

                    # update the tracker and grab the updated position
                    tracker.update(rgb)
                    pos = tracker.get_position()

                    # unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    # add the bounding box coordinates to the rectangles list
                    rects.append((startX, startY, endX, endY))
            # draw a horizontal line in the center of the frame -- once an
            # object crosses this line we will determine whether they were
            # moving 'up' or 'down'
            #cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = ct.update(rects)

            # loop over the tracked objects
            for (self.objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = trackableObjects.get(self.objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(self.objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:
                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        if direction < 0 and centroid[1] < H // 2:
                            totalUp += 1
                            to.counted = True

                        # if the direction is positive (indicating the object
                        # is moving down) AND the centroid is below the
                        # center line, count the object
                        elif direction > 0 and centroid[1] > H // 2:
                            totalDown += 1
                            to.counted = True

                # store the trackable object in our dictionary
                trackableObjects[self.objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(self.objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0),
                           -1)

            # construct a tuple of information we will be displaying on the
            # frame
            '''info = [
				("Up", totalUp),
				("Down", totalDown),
				("Status", status),
			]

			# loop over the info tuples and draw them on our frame
			for (i, (k, v)) in enumerate(info):
				text = "{}: {}".format(k, v)
				cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
					cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)'''

            # check to see if we should write the frame to disk
            if writer is not None:
                writer.write(frame)

            # show the output frame
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            # increment the total number of frames processed thus far and
            # then update the FPS counter
            totalFrames += 1
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        #print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
        #print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # check to see if we need to release the video writer pointer
        if writer is not None:
            writer.release()

        # if we are not using a video file, stop the camera video stream
        #if not get(self.i, False):
        #	vs.stop()

        # otherwise, release the video file pointer
        else:
            vs.release()

        # close any open windows
        cv2.destroyAllWindows()
    def runComputationallyTaskingAlgoIfBasicAlgoFails(self):
        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if self.totalFrames % self.conf["track_object"] == 0:
            # initialize our new set of object trackers
            self.trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(self.frame, size=(300, 300), ddepth=cv2.CV_8U)
            self.net.setInput(blob, scalefactor=1.0 / 127.5, mean=[127.5, 127.5, 127.5])
            detections = self.net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by ensuring the `confidence`
                # is greater than the minimum confidence
                if confidence > self.conf["confidence"]:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a car, ignore it
                    if self.CLASSES[idx] != "car":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([self.W, self.H, self.W, self.H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(self.rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    self.trackers.append(tracker)
            
        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing
        # throughput
        else:
            # loop over the trackers
            for tracker in self.trackers:
                # update the tracker and grab the updated position
                tracker.update(self.rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                self.rects.append((startX, startY, endX, endY))
示例#15
0
 def __init__(self):
     self.tracker = dlib.correlation_tracker()
示例#16
0
 def tracker_add(self, face_segm, left, top, right, botom):
     tracker = dlib.correlation_tracker()
     rect = dlib.rectangle(left, top, right, botom)
     self.rects.append((left, top, right, botom))
     tracker.start_track(face_segm, rect)
     self.trackers.append(tracker)
示例#17
0
def fn1():
    print('lib loaded')

    print('Enter in function')
    info = []
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-c",
                    "--confidence",
                    type=float,
                    default=0.4,
                    help="minimum probability to filter weak detections")
    ap.add_argument("-s",
                    "--skip-frames",
                    type=int,
                    default=30,
                    help="# of skip frames between detections")
    args = vars(ap.parse_args())
    print('Load models and files')
    s1 = r'C:\Users\hp\Desktop\people-counting-opencv\people-counting-opencv\mobilenet_ssd\MobileNetSSD_deploy.prototxt'
    model = r'C:\Users\hp\Desktop\people-counting-opencv\people-counting-opencv\mobilenet_ssd\MobileNetSSD_deploy.caffemodel'
    input_data = r'C:\Users\hp\Desktop\people-counting-opencv\people-counting-opencv\videos\Original.mp4'
    output = r'C:\Users\hp\Desktop\people-counting-opencv\people-counting-opencv\output\output.avi'

    # userName = request.args.get('userName')
    # print(userName)

    # initialize the list of class labels MobileNet SSD was trained to
    # detect
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(s1, model)

    # if a video path was not supplied, grab a reference to the webcam
    if not input_data:
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        time.sleep(2.0)

    # otherwise, grab a reference to the video file
    else:
        print("[INFO] opening video file...")
        vs = cv2.VideoCapture(input_data)

    # initialize the video writer (we'll instantiate later if need be)
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = 0
    totalUp = 0
    totalMiddle = 0
    count = 0
    bs = 0
    print('FPS start')
    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over frames from the video stream
    while True:
        # print('capture frames one by one')
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        frame = vs.read()
        frame = frame[1] if (input_data != False) else frame
        # print('Frame read')
        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if input_data is not None and frame is None:
            print('End of video')
            break

        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]

        # if we are supposed to be writing a video to disk, initialize
        # the writer
        if output is not None and writer is None:
            print('Entered into vide path')
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(output, fourcc, 30, (W, H), True)

        # initialize the current status along with our list of bounding
        # box rectangles returned by either (1) our object detector or
        # (2) the correlation trackers
        status = "Waiting"
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % args["skip_frames"] == 0:
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > args["confidence"]:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"

                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # draw a horizontal line in the center of the frame -- once an
        # object crosses this line we will determine whether they were
        # moving 'up' or 'down'
        cv2.line(frame, (int(W // (2)), 0), (int(W // (2)), H), (0, 255, 255),
                 2)
        cv2.line(frame, (int(W // (2.5)), 0), (int(W // (2.5)), H),
                 (255, 0, 0), 2)
        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)
        caution = 0
        danger = 0
        safe = 0
        ls = []

        for i in objects.items():
            # print "-------------------------",i
            if i[1][0] < W // (2) and i[1][0] > W // 2.5:
                caution += 1
            elif i[1][0] > W // 2:
                safe += 1
                outerDate = date.today()
                # print("outerDate=", outerDate)
                outerTime = datetime.datetime.now().strftime("%H:%M:%S")
                ls.append(outerTime)

            elif i[1][0] < W // 2.5:
                danger += 1
                enteredTime = datetime.datetime.now().strftime("%H:%M:%S")
                ls.append(enteredTime)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            # if to is None:
            #     to = TrackableObject(objectID, centroid)
            #
            # # otherwise, there is a trackable object so we can utilize it
            # # to determine direction
            # else:
            #     # the difference between the y-coordinate of the *current*
            #     # centroid and the mean of *previous* centroids will tell
            #     # us in which direction the object is moving (negative for
            #     # 'up' and positive for 'down')
            #     y = [c[0] for c in to.centroids]
            #     direction = centroid[0] - np.mean(y)
            #     to.centroids.append(centroid)
            #
            #     # check to see if the object has been counted or not
            #     # if to.counted == False or to.counted == "med" or to.counted != "out":
            #     #     # if the direction is negative (indicating the object
            #     #     # is moving up) AND the centroid is above the center
            #     #     # line, count the object
            #     #     if centroid[0] < W // (2) and centroid[0] > W // (2.5) and to.counted != "med" and count == 0:
            #     #
            #     #         totalMiddle += 1
            #     #         to.counted = "med"
            #     #
            #     #     elif direction < 0 and centroid[0] < W // (2.5) and to.counted != True:
            #     #         totalUp += 1
            #     #         to.counted = True
            #     #
            #     #
            #     #     # if the direction is positive (indicating the object
            #     #     # is moving down) AND the centroid is below the
            #     #     # center line, count the object
            #     #     elif direction > 0 and centroid[0] > W // (2) and to.counted != "out" and (
            #     #             to.counted == True or to.counted == "med"):
            #     #         totalDown += 1
            #     #         to.counted = "out"
            #     #
            # # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        # construct a tuple of information we will be displaying on the
        # frame
        info = [
            ("Entered", danger),
            ("Just about to enter", caution),
            ("Out", safe),
            ("Status", status),
        ]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # check to see if we should write the frame to disk
        if writer is not None:
            writer.write(frame)

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    # print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    # print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # check to see if we need to release the video writer pointer
    if writer is not None:
        writer.release()

    # if we are not using a video file, stop the camera video stream

    if not input_data:
        vs.stop()

    # otherwise, release the video file pointer
    else:
        vs.release()

    # close any open windows
    cv2.destroyAllWindows()

    print("info=", info)

    print("list ", ls)
    connection = pymysql.connect(host="localhost",
                                 user="******",
                                 password="******",
                                 db="misbehavedetection")

    cursor1 = connection.cursor()
    #
    enteredCount = info[0][1]
    outerCount = info[2][1]

    print("enteredCount=", enteredCount, "outerCount=", outerCount)

    print("outerTime:" + str(outerTime) + "\nenterdDate:" + str(enteredTime))

    detectionDate = str(date.today())

    cursor1.execute(
        "INSERT INTO visual_db (enterTime,exitTime,detectionDate) VALUES ('" +
        str(enteredTime) + "','" + str(outerTime) + "','" + detectionDate +
        "')")

    connection.commit()
    cursor1.close()
    connection.close()

    dangerTup = info[0]

    print(dangerTup)

    return "outerTime:" + str(outerTime) + "\nenterdDate:" + str(enteredTime)
def  age_gender(vs,dur):
    totalFrames = 0
    totalDown = 0
    totalUp = 0
    rects=[]
    agen = []
    gend = []
    writer = None
    W= None
    H= None
    up = []
    down = []
    skipFrames = rate
    status = ""
    net = cv2.dnn.readNet("deploy.prototxt", "deploy.caffemodel")
    age_net = cv2.dnn.readNetFromCaffe('deploy_age.prototxt', 'age_net.caffemodel')
    gender_net = cv2.dnn.readNetFromCaffe('deploy_gender.prototxt', 'gender_net.caffemodel')
    font = cv2.FONT_HERSHEY_SIMPLEX
    MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
    age_list = ['(0, 2)', '(4, 6)', '(8, 12)', '(15, 20)', '(25, 32)', '(38, 43)', '(48, 53)', '(60, 100)']
    gender_list = ['Male', 'Female']
    CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
    "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
    "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
    "sofa", "train", "tvmonitor"]
    frameST = st.empty()
    trackers = []
    trackableObjects = {}


    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)

    widgets = ['Loading: ', progressbar.AnimatedMarker()]
    bar = progressbar.ProgressBar(widgets=widgets).start() 
    
    with st.spinner('Processing...'):
        while True:
            frame = vs.read()
            frame = frame[1]

            if frame is None:
                break

            frame = imutils.resize(frame, width=320)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            if W is None or H is None:
                (H, W) = frame.shape[:2]

            status = "Waiting"
            
            rects = []
            
            if totalFrames % skipFrames < skipFrames/5:
                status = "Detecting"
                trackers = []

                face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')

                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                faces = face_cascade.detectMultiScale(gray, 1.1, 5)
                for (x, y, w, h )in faces:
                    cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 0), 2)

                    face_img = frame[y:y+h, h:h+w].copy()
                    blob = cv2.dnn.blobFromImage(face_img, 1, (227, 227), MODEL_MEAN_VALUES, swapRB=False)

                    gender_net.setInput(blob)
                    gender_preds = gender_net.forward()
                    gender = gender_list[gender_preds[0].argmax()]
                    print("Gender : " + gender)
                    age_net.setInput(blob)
                    age_preds = age_net.forward()
                    age = age_list[age_preds[0].argmax()]
                    print("Age Range: " + age)
                    overlay_text = "%s %s" % (gender, age)
                    cv2.putText(frame, overlay_text, (x, y), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
                    agen.append(age)
                    gend.append(gender)
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)

                net.setInput(blob)
                detections = net.forward()

                for i in np.arange(0, detections.shape[2]):

                    confidence = detections[0, 0, i, 2]
                    if confidence > confidence_threshold:
                        idx = int(detections[0, 0, i, 1])

                        if CLASSES[idx] not in ("person"):
                            continue
         

                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
          
                        (startX, startY, endX, endY) = box.astype("int")
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)
                        trackers.append(tracker)
            else:
              for tracker in trackers:
                status = "Tracking"

                tracker.update(rgb)
                pos = tracker.get_position()

                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))

            cv2.line(frame, (0, 0), (W, 0), (0, 255, 255), 2)

            objects = ct.update(rects)

            for (objectID, centroid) in objects.items():
                to = trackableObjects.get(objectID, None)


                if to is None:
                    to = TrackableObject(objectID, centroid)


                else:
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    if not to.counted:
                        if direction < 0 and centroid[1] < H // 2:
                            totalUp += 1
                            to.counted = True


                        elif direction > 0 and centroid[1] > H // 2:
                            totalDown += 1
                            to.counted = True

                up.append(totalUp)
                down.append(totalDown)
                trackableObjects[objectID] = to


                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
               
            info = [("Up", totalUp),("Down", totalDown),("Status", status),]


            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            totalFrames += 1
            frameST.image(frame, channels="BGR",caption='output video', use_column_width=True)
            if writer is None:
                fourcc = cv2.VideoWriter_fourcc(*"XVID")
                writer = cv2.VideoWriter("output.avi", fourcc, rate,(frame.shape[1], frame.shape[0]), True)

        return up, down, gend, agen
示例#19
0
文件: cv_2.py 项目: sheviv/mirrors
def postprocess(frame, outs):
    global inCount, Font, count, SKIP_FRAMES, outCount
    frameHeight = frame_cropped.shape[0]
    frameWidth = frame_cropped.shape[1]

    # Scan through all the bounding boxes output from the network and keep only the
    # ones with high confidence scores. Assign the box's class label as the class with the highest score.
    classIds = []
    confidences = []
    boxes = []
    for out in outs:
        for detection in out:
            scores = detection[5:]
            classId = np.argmax(scores)
            confidence = scores[classId]
            if confidence > confThreshold:
                center_x = int(detection[0] * frameWidth)
                center_y = int(detection[1] * frameHeight)
                width = int(detection[2] * frameWidth)
                height = int(detection[3] * frameHeight)
                left = int(center_x - width / 2)
                top = int(center_y - height / 2)
                classIds.append(classId)
                confidences.append(float(confidence))
                boxes.append([left, top, width, height])

    # Perform non maximum suppression to eliminate redundant overlapping boxes with
    # lower confidences.
    indices = cv2.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
    trackers_to_del = []
    # Delete lost trackers based on tracking quality
    for tid, trackersid in enumerate(trackers):
        trackingQuality = trackersid[0].update(frame_cropped)
        if trackingQuality < 5:
            trackers_to_del.append(trackersid[0])
    try:
        for _ in trackers_to_del:
            trackers.pop(tid)
    except IndexError:
        pass

    for i in indices:
        i = i[0]
        box = boxes[i]
        left = box[0]
        top = box[1]
        width = box[2]
        height = box[3]
        classId, conf, left, top, right, bottom = classIds[i], confidences[
            i], left, top, left + width, top + height

        rect = dlib.rectangle(left, top, right, bottom)
        (x, y, w, h) = rect_to_bb(rect)

        tracking = False

        for trackersid in trackers:
            pos = trackersid[0].get_position()
            startX = int(pos.left())
            startY = int(pos.top())
            endX = int(pos.right())
            endY = int(pos.bottom())
            tx, ty = findCenter(startX, startY, endX, endY)

            t_location_chk = pointInRect(x, y, w, h, tx, ty)
            if t_location_chk:
                tracking = True

        if not tracking:
            tracker = dlib.correlation_tracker()
            tracker.start_track(frame_cropped, rect)
            trackers.append([tracker, frame_cropped])

    for num, trackersid in enumerate(trackers):
        pos = trackersid[0].get_position()
        startX = int(pos.left())
        startY = int(pos.top())
        endX = int(pos.right())
        endY = int(pos.bottom())

        cv2.rectangle(frame_cropped, (startX, startY), (endX, endY),
                      (0, 255, 250), 1)
        if endX < 380 and endY >= 280:
            inCount += 1
            trackers.pop(num)
示例#20
0
文件: client.py 项目: ZhouYzzz/CTT
 def _CT_init(self):
     CT = dlib.correlation_tracker()
     return CT
示例#21
0
         for box1 in boxes1:
             a = match_boxes(box1, tracker.get_position())
             if a:
                 del boxes[boxes == box1]
     for box in boxes:
         encoding = face_recognition.face_encodings(
             dlib.get_face_chip(frame, sp(frame, box)))
         if len(encoding) > 0:
             print("found encoding")
             matches = face_recognition.face_distance(
                 list(seen_faces.values()), encoding[0])
             if matches[np.argmin(matches)] < 0.6:
                 face_id = list(seen_faces.keys())[np.argmin(matches)]
                 print(face_id)
                 if face_id not in trackers.keys():
                     trackers[face_id] = dlib.correlation_tracker()
                     trackers[face_id].start_track(frame, box)
             else:
                 new_id = str(len(seen_faces.values()) + 1)
                 trackers[new_id] = dlib.correlation_tracker()
                 trackers[new_id].start_track(frame, box)
                 seen_faces[new_id] = encoding[0]
         else:
             temp_new_id = str(len(temp_trackers.values()) + 1)
             temp_trackers[temp_new_id] = dlib.correlation_tracker()
             temp_trackers[temp_new_id].start_track(frame, box)
 if len(trackers) > 0:
     for face_id, tracker in trackers.items():
         p1 = (int(tracker.get_position().left()),
               int(tracker.get_position().top()))
         p2 = (int(tracker.get_position().right()),
示例#22
0
def trackMultipleObjects():
    rectangleColor = (0, 255, 0)
    frameCounter = 0
    currentCarID = 0
    fps = 0

    carTracker = {}

    while True:
        start_time = time.time()
        rc, image = video.read()
        if type(image) == type(None):
            break

        image = cv2.resize(image, (WIDTH, HEIGHT))
        resultImage = image.copy()

        frameCounter = frameCounter + 1
        for carID in carTracker.keys():
            trackingQuality = carTracker[carID].update(image)

        if not (frameCounter % 10):
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            cars = carCascade.detectMultiScale(gray, 1.1, 13, 18, (24, 24))

            for (_x, _y, _w, _h) in cars:
                x = int(_x)
                y = int(_y)
                w = int(_w)
                h = int(_h)

                x_bar = x + 0.5 * w
                y_bar = y + 0.5 * h

                matchCarID = None

                for carID in carTracker.keys():
                    trackedPosition = carTracker[carID].get_position()

                    t_x = int(trackedPosition.left())
                    t_y = int(trackedPosition.top())
                    t_w = int(trackedPosition.width())
                    t_h = int(trackedPosition.height())

                    t_x_bar = t_x + 0.5 * t_w
                    t_y_bar = t_y + 0.5 * t_h

                    if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <=
                                                           (t_y + t_h))
                            and (x <= t_x_bar <= (x + w)) and (y <= t_y_bar <=
                                                               (y + h))):
                        matchCarID = carID

                if matchCarID is None:
                    print('Creating new tracker ' + str(currentCarID))

                    tracker = dlib.correlation_tracker()
                    tracker.start_track(image,
                                        dlib.rectangle(x, y, x + w, y + h))

                    carTracker[currentCarID] = tracker
                    currentCarID = currentCarID + 1

        for carID in carTracker.keys():
            trackedPosition = carTracker[carID].get_position()

            t_x = int(trackedPosition.left())
            t_y = int(trackedPosition.top())
            t_w = int(trackedPosition.width())
            t_h = int(trackedPosition.height())

            cv2.rectangle(resultImage, (t_x, t_y), (t_x + t_w, t_y + t_h),
                          rectangleColor, 4)

        cv2.imshow('result', resultImage)

        if cv2.waitKey(33) == 27:
            break

    cv2.destroyAllWindows()
def run():
    classes = [c.strip() for c in open('coco.names').readlines()]
    conf_threshold = 0.6  # lay confidence > 0.5
    nmsThreshold = 0.4  # > 0.5 se ap dung Non-max Surpression
    shape = 288
    colors = []
    colors.append([(randint(0, 255), randint(0, 255), randint(0, 255))
                   for i in range(1000)])
    detected_classes = ['cell phone']
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    pts = [deque(maxlen=10) for _ in range(1000)]
    counter = 0
    center = None
    trackers = []
    totalIn = []
    empty = []
    empty1 = []
    trackableObjects = {}
    totalFrames = 0
    totalDown = 0
    totalUp = 0
    (W, H) = (None, None)
    net = yolo_net("yolov3.weights", "yolov3.cfg")
    if config.Thread:
        vid = thread.ThreadingClass(0)
    else:
        vid = cv2.VideoCapture(0)
    while True:
        if config.Thread:
            img = vid.read()
        else:
            _, img = vid.read()
        img = cv2.resize(img, (600, 500))
        rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        if W is None or H is None:
            (H, W) = img.shape[:2]
        status = "Waiting"
        rects = []
        if totalFrames % 30 == 0:
            status = "Detecting"
            trackers = []
            outputs = yolo_output(net, img, shape)
            bbox, classIds, confs = yolo_predict(outputs, conf_threshold, H, W)
            indices = cv2.dnn.NMSBoxes(bbox, confs, conf_threshold,
                                       nmsThreshold)
            for i in indices:
                i = i[0]
                if classes[classIds[i]] not in detected_classes: continue
                box = bbox[i]
                color = colors[0][i]
                x, y, w, h = box[0], box[1], box[2], box[3]
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(x, y, x + w, y + h)
                tracker.start_track(rgb, rect)
                trackers.append(tracker)
                cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
        else:
            for tracker in trackers:
                status = "Tracking"
                tracker.update(rgb)
                pos = tracker.get_position()
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                rects.append((startX, startY, endX, endY))
        cv2.line(img, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
        obj = ct.update(rects)
        for (objectID, centroid) in obj.items():
            to = trackableObjects.get(objectID, None)
            if to is None:
                to = TrackableObject(objectID, centroid)
            else:
                oy = [c[1] for c in to.centroids]
                directionY = centroid[1] - np.mean(oy)
                to.centroids.append(centroid)
                if not to.counted:
                    if directionY < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        empty.append(totalUp)
                        to.counted = True

                    elif directionY > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        empty1.append(totalDown)
                        # print(empty1[-1])
                        totalIn = []
                        # compute the sum of total people inside
                        totalIn.append(len(empty1) - len(empty))
                        print("Total people inside:", totalIn)
                        # if the people limit exceeds over threshold, send an email alert
                        if sum(totalIn) >= config.Threshold:
                            cv2.putText(img, "-ALERT: People limit exceeded-",
                                        (10, img.shape[0] - 80),
                                        cv2.FONT_HERSHEY_COMPLEX, 0.5,
                                        (0, 0, 255), 2)
                            if config.ALERT:
                                print("[INFO] Sending email alert..")
                                # Mailer().send(config.MAIL)
                                print("[INFO] Alert sent")

                        to.counted = True

            trackableObjects[objectID] = to

            text1 = "ID {}".format(objectID)
            colorID = colors[0][objectID]
            cv2.circle(img, (centroid[0], centroid[1]), 4, colorID, -1)
            # cv2.putText(img, "Direction: {}".format(direction), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            center = (centroid[0], centroid[1])
            pts[objectID].append(center)
            for i in range(1, len(pts[objectID])):
                if pts[objectID][i - 1] is None or pts[objectID][i] is None:
                    continue
                thickness = int(np.sqrt(10 / float(i + 1)) * 2.5)
                cv2.line(img, pts[objectID][i - 1], pts[objectID][i], colorID,
                         thickness)
        info = [
            ("Up", totalUp),
            ("Down", totalDown),
            ("Status", status),
        ]
        info2 = [
            ("Total people inside", totalIn),
        ]

        for (i, (k, v)) in enumerate(info):
            text2 = "{}: {}".format(k, v)
            cv2.putText(img, text2, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
        for (i, (k, v)) in enumerate(info2):
            text3 = "{}: {}".format(k, v)
            cv2.putText(img, text3, (265, H - ((i * 20) + 60)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
        if config.Log:
            datetimee = [
                datetime.datetime.now().strftime("%d/%m/%Y, %H:%M:%S")
            ]
            d = [datetimee, empty1, empty, totalIn]
            print("D: ", d)
            export_data = zip_longest(*d, fillvalue='')
            print("Export Data: ", export_data)
            with open('Log.csv', 'w', newline='') as myfile:
                wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
                wr.writerow(("End Time", "In", "Out", "Total Inside"))
                wr.writerows(export_data)
                myfile.close()
            with open('Log.csv', 'a', newline='') as f:
                wr = csv.writer(f, quoting=csv.QUOTE_ALL)
                wr.writerows(export_data)

        cv2.imshow('Result', img)
        key = cv2.waitKey(1)
        if key == ord('q'): break
        totalFrames += 1
        if config.Timer:
            # Automatic timer to stop the live stream. Set to 8 hours (28800s).
            t1 = time.time()
            num_seconds = (t1 - t0)
            if num_seconds > 28800:
                break
    if not config.Thread:
        vid.release()
    cv2.destroyAllWindows()
示例#24
0
#   via the command:
#       pip install -U scikit-image
#   Or downloaded from http://scikit-image.org/download.html. 

import os
import glob

import dlib
from skimage import io

# Path to the video frames
video_folder = os.path.join("..", "examples", "video_frames")

# Create the correlation tracker - the object needs to be initialized
# before it can be used
tracker = dlib.correlation_tracker()

win = dlib.image_window()
# We will track the frames as we load them off of disk
for k, f in enumerate(sorted(glob.glob(os.path.join(video_folder, "*.jpg")))):
    print("Processing Frame {}".format(k))
    img = io.imread(f)

    # We need to initialize the tracker on the first frame
    if k == 0:
        # Start a track on the juice box. If you look at the first frame you
        # will see that the juice box is contained within the bounding
        # box (74, 67, 112, 153).
        tracker.start_track(img, dlib.rectangle(74, 67, 112, 153))
    else:
        # Else we just attempt to track from the previous frame
示例#25
0
 def initTracker(self):
     return dlib.correlation_tracker()
示例#26
0
    utils.draw_roi(imref, ROIcorner)
    utils.imshow_scaled("Images", imref)
    console.print(Rule())
    console.print(
        "Press ENTER to validate, else press any other key to start again.",
        style="yellow bold blink")
    key = cv.waitKey(0)
    if key == 13:  # if ENTER is pressed, exit loop
        break

# Initial length and clamp ROI center
l0 = utils.compute_length(ROI1, ROI2)
center0 = utils.ROIcenter(ROIcorner)

# Init trackers
tracker1 = dlib.correlation_tracker()
tracker1.start_track(imrefbw, utils.bb_to_rect(ROI1))
tracker2 = dlib.correlation_tracker()
tracker2.start_track(imrefbw, utils.bb_to_rect(ROI2))
tracker3 = dlib.correlation_tracker()
tracker3.start_track(imrefbw, utils.bb_to_rect(ROIcorner))

# Init strain and displacement plot
plt.ion()
fig, ax1 = plt.subplots()
frames = []
strain = []
disp = []
line1, = ax1.plot(frames, strain, "ro")
ax1.set_xlabel("Image")
ax1.set_ylabel("Strain [%]", color="red")
示例#27
0
 def __init__(self, json, previous_frame, left, top, width, height):
     self.tracker = dlib.correlation_tracker()
     self.frames = json_to_frames(os.path.join(CUSTOM_STATIC_PATH, json))
     points = [(left, top, left+width, top+height)]
     self.start_new_tracking(previous_frame, points)
示例#28
0
def trackMultipleObjects():
    rectangleColor = (0, 255, 0)
    frameCounter = 0
    currentCarID = 0
    fps = 1
    cropped_image = {}
    LPN = {}
    carTracker = {}
    carNumbers = {}
    carLocation1 = {}
    carLocation2 = {}
    speed = [None] * 1000
    counter = 0
    a = 0
    myuse = []

    while True:

        start_time = time.time()
        rc, image_2 = video.read()
        if type(image_2) == type(None):
            break

        image_1 = cv2.resize(image_2, (WIDTH, HEIGHT))
        resultImage = image_1.copy()
        height = image_1.shape[0]
        width = image_1.shape[1]
        image = image_1[0:height, 0:int(width / 2)]

        frameCounter = frameCounter + 1

        carIDtoDelete = []

        for carID in carTracker.keys():
            trackingQuality = carTracker[carID].update(image)

            if trackingQuality < 7:  #7
                carIDtoDelete.append(carID)

        for carID in carIDtoDelete:
            carTracker.pop(carID, None)
            carLocation1.pop(carID, None)
            carLocation2.pop(carID, None)
            cropped_image.pop(carID, None)
            LPN.pop(carID, None)

        if not (frameCounter % 10
                ):  #it goes inside only when frameCounter is a multiple of 10
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            cars = carCascade.detectMultiScale(gray, 1.1, 13, 18, (24, 24))

            cars = list(cars)

            #print(cars)

            for i in range(len(cars)):
                #print(cars, 'HAKUNA MATATA')
                if (cars[i][0] <
                        195) or (cars[i][2] < 58) or (cars[i][3] < 58) or (
                            (cars[i][1] > 40) and (cars[i][2] < 68)) or (
                                (cars[i][1] > 268) and
                                (cars[i][2] < 250)) or ((cars[i][1] > 268) and
                                                        (cars[i][3] < 250)):
                    myuse.append(i)

            if myuse != []:
                #print(cars)
                for i in range(len(myuse) - 1, -1, -1):
                    cars.pop(myuse[i])

                #print(cars)
                myuse = []

            for i in range(len(cars)):
                if (cars[i][1] > 40) and (cars[i][2] < 70):
                    myuse.append(i)

            if myuse != []:
                #print(cars)
                for i in range(len(myuse) - 1, -1, -1):
                    cars.pop(myuse[i])

                #print(cars)
                myuse = []

            for i in range(len(cars)):
                if (cars[i][1] > 110) and (cars[i][2] < 120):
                    myuse.append(i)

            if myuse != []:
                #print(cars)
                for i in range(len(myuse) - 1, -1, -1):
                    cars.pop(myuse[i])

                #print(cars)
                myuse = []

            for i in range(len(cars)):
                if (cars[i][1] > 268) and (cars[i][2] < 250):
                    myuse.append(i)

            if myuse != []:
                for i in range(len(myuse) - 1, -1, -1):
                    cars.pop(myuse[i])

                myuse = []
            for (_x, _y, _w, _h) in cars:
                x = int(_x)
                y = int(_y)
                w = int(_w)
                h = int(_h)
                x_bar = x + 0.5 * w
                y_bar = y + 0.5 * h
                matchCarID = None
                for carID in carTracker.keys():
                    trackedPosition = carTracker[carID].get_position()
                    t_x = int(trackedPosition.left())
                    t_y = int(trackedPosition.top())
                    t_w = int(trackedPosition.width())
                    t_h = int(trackedPosition.height())

                    t_x_bar = t_x + 0.5 * t_w
                    t_y_bar = t_y + 0.5 * t_h
                    if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <=
                                                           (t_y + t_h))
                            and (x <= t_x_bar <= (x + w)) and (y <= t_y_bar <=
                                                               (y + h))):
                        matchCarID = carID
                if matchCarID is None:
                    print('Creating new tracker ' + str(currentCarID))
                    tracker = dlib.correlation_tracker()
                    tracker.start_track(image,
                                        dlib.rectangle(x, y, x + w, y + h))
                    carTracker[currentCarID] = tracker
                    carLocation1[currentCarID] = [x, y, w, h]

                    currentCarID = currentCarID + 1

        myuse = []
        for carID in carTracker.keys():
            x_1 = carTracker[carID].get_position().left()
            y_1 = carTracker[carID].get_position().top()
            w_1 = carTracker[carID].get_position().width()
            h_1 = carTracker[carID].get_position().height()
            for carID_2 in carTracker.keys():
                x_2 = carTracker[carID_2].get_position().left()
                y_2 = carTracker[carID_2].get_position().top()
                w_2 = carTracker[carID_2].get_position().width()
                h_2 = carTracker[carID_2].get_position().height()
                if (carID != carID_2):
                    #print(x_2, x_1+10, x_2+w_2, ' ', x_2, x_1+w_1-10, x_2+w_2, ' ', y_2, y_1+h_1+10, y_2+h_2, 'SEEE THIS BRO')
                    if (x_2 < (x_1 + 10)) and ((x_1 + 10) < (x_2 + w_2)) and (
                        (x_1 + w_1 - 10) > x_2) and (
                            (x_1 + w_1 - 10) < (x_2 + w_2)) and (
                                (y_1 + h_1 + 10) > y_2) and ((y_1 + h_1 + 10) <
                                                             (y_2 + h_2)):
                        myuse.append(carID)

        if myuse != []:
            #print(cars)
            for i in range(len(myuse) - 1, -1, -1):
                carTracker.pop(myuse[i])
                carLocation1.pop(myuse[i])
        myuse = []

        for carID in carTracker.keys():
            x_1 = carTracker[carID].get_position().left()
            y_1 = carTracker[carID].get_position().top()
            w_1 = carTracker[carID].get_position().width()
            h_1 = carTracker[carID].get_position().height()
            for carID_2 in carTracker.keys():
                x_2 = carTracker[carID_2].get_position().left()
                y_2 = carTracker[carID_2].get_position().top()
                w_2 = carTracker[carID_2].get_position().width()
                h_2 = carTracker[carID_2].get_position().height()
                if (carID != carID_2):
                    if ((x_1 + w_1 + 10) > x_2) and (
                        (x_1 + w_1 + 10) <
                        (x_2 + w_2)) and ((y_1 + 10) > y_2) and (
                            (y_1 + 10) < (y_2 + h_2)) and (
                                (y_1 + h_1 - 10) > y_2) and ((y_1 + h_1 - 10) <
                                                             (y_2 + h_2)):
                        myuse.append(carID)
        if myuse != []:
            for i in range(len(myuse) - 1, -1, -1):
                carTracker.pop(myuse[i])
                carLocation1.pop(myuse[i])
        myuse = []

        for carID in carTracker.keys():
            x_1 = carTracker[carID].get_position().left()
            y_1 = carTracker[carID].get_position().top()
            w_1 = carTracker[carID].get_position().width()
            h_1 = carTracker[carID].get_position().height()
            for carID_2 in carTracker.keys():
                x_2 = carTracker[carID_2].get_position().left()
                y_2 = carTracker[carID_2].get_position().top()
                w_2 = carTracker[carID_2].get_position().width()
                h_2 = carTracker[carID_2].get_position().height()
                if (carID != carID_2):
                    if ((x_1 + 10) > x_2) and ((x_1 + 10) < (x_2 + w_2)) and (
                        (x_1 + w_1 - 10) > x_2) and (
                            (x_1 + w_1 - 10) < (x_2 + w_2)) and (
                                (y_1 - 10) > y_2) and ((y_1 - 10) <
                                                       (y_2 + h_2)):
                        myuse.append(carID)

        if myuse != []:
            #print(cars)
            for i in range(len(myuse) - 1, -1, -1):
                carTracker.pop(myuse[i])
                carLocation1.pop(myuse[i])
        myuse = []
        for carID in carTracker.keys():
            x_1 = carTracker[carID].get_position().left()
            y_1 = carTracker[carID].get_position().top()
            w_1 = carTracker[carID].get_position().width()
            h_1 = carTracker[carID].get_position().height()
            for carID_2 in carTracker.keys():
                x_2 = carTracker[carID_2].get_position().left()
                y_2 = carTracker[carID_2].get_position().top()
                w_2 = carTracker[carID_2].get_position().width()
                h_2 = carTracker[carID_2].get_position().height()
                if (carID != carID_2):
                    if ((x_1 - 10) > x_2) and ((x_1 - 10) < (x_2 + w_2)) and (
                        (y_1 + 10) > y_2) and ((y_1 + 10) < (y_2 + h_2)) and (
                            (y_1 + h_1 - 10) > y_2) and ((y_1 + h_1 - 10) <
                                                         (y_2 + h_2)):
                        myuse.append(carID)

        if myuse != []:
            for i in range(len(myuse) - 1, -1, -1):
                carTracker.pop(myuse[i])
                carLocation1.pop(myuse[i])
        myuse = []
        for carID in carTracker.keys():
            trackedPosition = carTracker[carID].get_position()
            t_x = int(trackedPosition.left())
            t_y = int(trackedPosition.top())
            t_w = int(trackedPosition.width())
            t_h = int(trackedPosition.height())
            cv2.rectangle(resultImage, (t_x, t_y), (t_x + t_w, t_y + t_h),
                          rectangleColor, 4)
            carLocation2[carID] = [t_x, t_y, t_w, t_h]
        end_time = time.time()
        if not (end_time == start_time):
            fps = 1.0 / (end_time - start_time)
        for i in carLocation1.keys():
            if frameCounter % 1 == 0:  #it will always work
                [x1, y1, w1, h1] = carLocation1[i]
                [x2, y2, w2, h2] = carLocation2[i]
                carLocation1[i] = [x2, y2, w2, h2]
                if [x1, y1, w1, h1] != [x2, y2, w2, h2]:
                    if (
                            speed[i] == None or speed[i] == 0
                    ):  #275.. 285  and y1 >= 250 and y1 <= 260.. this is to give command to the code to only estimate speed when the vehicle is between 275 and 285!
                        speed[i] = estimateSpeed([x1, y1, w1, h1],
                                                 [x2, y2, w2, h2], fps)
                        print('1st', x1, y1, w1, h1, ' ', '2nd', x2, y2, w2,
                              h2, ' ', speed[i], ' ', 'fps:', fps)
                    if (
                            speed[i] != None and (x2 > x1 + 5 or x2 < x1 - 5)
                            and (y2 > y1 + 5 or y2 < y1 - 5)
                    ):  #so that even if the driver opens his seat, the code doesn't detect speed in that
                        cv2.putText(resultImage,
                                    str(int(speed[i])) + " km/hr",
                                    (int(x1 + w1 / 2), int(y1 - 5)),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                    (255, 0, 0), 2)
                        print(speed[i], 'YOOOOOOOOOOOO')
                        print(datetime.datetime.now())
                        cropped_image[i] = image[y1:y1 + h1 + 25,
                                                 x1:x1 + w1 + 25]

        cv2.imshow('result', resultImage)
        if cv2.waitKey(33) == 27:
            break
    cv2.destroyAllWindows()
            people_real_num = people_real_num + 1
            for point_i in range(0, point_num):
                if person_conf_multi[people_i][point_i][0] + person_conf_multi[people_i][point_i][1] != 0: # If coordinates of point is (0, 0) == meaningless data
                    draw.ellipse(ellipse_set(person_conf_multi, people_i, point_i), fill=point_color)
                    people_x.append(person_conf_multi[people_i][point_i][0])
                    people_y.append(person_conf_multi[people_i][point_i][1])
            if i == 0:
                target_points.append((int(min(people_x)), int(min(people_y)), int(max(people_x)), int(max(people_y))))
            else:
                is_new_person = True
                for k in range(len(tracker)):
                    rect = tracker[k].get_position()
                    if np.mean(people_x) < rect.right() and np.mean(people_x) > rect.left() and np.mean(people_y) < rect.bottom() and np.mean(people_y) > rect.top():
                        is_new_person = False
                if is_new_person == True:
                    tracker.append(dlib.correlation_tracker())
                    print('is_new_person!')
                    rect_temp = []
                    rect_temp.append((int(min(people_x)), int(min(people_y)), int(max(people_x)), int(max(people_y))))
                    [tracker[i+len(tracker)-1].start_track(image, dlib.rectangle(*rect)) for i, rect in enumerate(rect_temp)]

    ##########

    if i == 0:
        # Initial co-ordinates of the object to be tracked
        # Create the tracker object
        tracker = [dlib.correlation_tracker() for _ in range(len(target_points))]
        # Provide the tracker the initial position of the object
        [tracker[i].start_track(image, dlib.rectangle(*rect)) for i, rect in enumerate(target_points)]

    #####
def run(source=0, dispLoc=False):
    # Create the VideoCapture object
    cam = cv2.VideoCapture(source)

    # If Camera Device is not opened, exit the program
    if not cam.isOpened():
        print("Video device or file couldn't be opened")
        exit()

    print("Press key `p` to pause the video to start tracking")
    while True:
        # Retrieve an image and Display it.
        retval, img = cam.read()
        if not retval:
            print("Cannot capture frame device")
            exit()
        if (cv2.waitKey(10) == ord('p')):
            break
        cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
        cv2.imshow("Image", img)
    cv2.destroyWindow("Image")

    # Co-ordinates of objects to be tracked
    # will be stored in a list named `points`
    points = get_points.run(img)
    print(points)
    if not points:
        print("ERROR: No object to be tracked.")
        exit()

    cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
    cv2.imshow("Image", img)

    # Initial co-ordinates of the object to be tracked
    # Create the tracker object
    tracker = dlib.correlation_tracker()
    # Provide the tracker the initial position of the object
    tracker.start_track(img, dlib.rectangle(*points[0]))

    while True:
        # Read frame from device or file
        retval, img = cam.read()
        if not retval:
            print("Cannot capture frame device | CODE TERMINATING :(")
            exit()
        # Update the tracker
        start = time.time()
        tracker.update(img)
        # Get the position of the object, draw a
        # bounding box around it and display it.
        rect = tracker.get_position()
        pt1 = (int(rect.left()), int(rect.top()))
        pt2 = (int(rect.right()), int(rect.bottom()))
        cv2.rectangle(img, pt1, pt2, (255, 255, 255), 3)
        end = time.time()
        print("Object tracked at [{}, {}] \r".format(pt1, pt2), )
        print("fps: ", (1 / (end - start)))
        if dispLoc:
            loc = (int(rect.left()), int(rect.top() - 20))
            txt = "Object tracked at [{}, {}]".format(pt1, pt2)
            cv2.putText(img, txt, loc, cv2.FONT_HERSHEY_SIMPLEX, .5,
                        (255, 255, 255), 1)
        cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
        cv2.imshow("Image", img)
        # Continue until the user presses ESC key
        if cv2.waitKey(1) == 27:
            break

    # Relase the VideoCapture object
    cam.release()
示例#31
0
def run(source=0, dispLoc=False):
    # Create the VideoCapture object

    # flash_threshold = 482286005 # video 3 mid
    # flash_threshold = 256239190 # video 3 left
    # flash_threshold = 378792000 # video 3 right

    # flash_threshold = 232000000 # video 4 left
    # flash_threshold = 536700000 # video 4 mid
    flash_threshold = 240205771  # video 4 top
    cam = cv2.VideoCapture(source)

    # If Camera Device is not opened, exit the program
    if not cam.isOpened():
        print("Video device or file couldn't be opened")
        exit()

    print("Press key `p` to pause the video to start tracking")
    while True:
        # Retrieve an image and Display it.
        retval, img = cam.read()
        if not retval:
            print("Cannot capture frame device")
            exit()
        if (cv2.waitKey(10) == ord('p')):
            break
        cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
        cv2.imshow("Image", img)

        if img.sum() > flash_threshold:
            print("flash", img.sum())

        # for the unseen video, print the sum of RGB value to set the flash threshold
        # print(img.sum())
    cv2.destroyWindow("Image")

    # Co-ordinates of objects to be tracked
    # will be stored in a list named `points`
    points = get_points.run(img)

    if not points:
        print("ERROR: No object to be tracked.")
        exit()

    cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
    cv2.imshow("Image", img)

    # Initial co-ordinates of the object to be tracked
    # Create the tracker object
    tracker = dlib.correlation_tracker()
    # Provide the tracker the initial position of the object
    tracker.start_track(img, dlib.rectangle(*points[0]))

    position_tuple_list = []
    record = False
    while True:
        # Read frame from device or file
        retval, img = cam.read()
        if not retval:
            print("Cannot capture frame device | CODE TERMINATING :(")
            break
        # Update the tracker
        tracker.update(img)
        # Get the position of the object, draw a
        # bounding box around it and display it.
        rect = tracker.get_position()
        if img.sum() > flash_threshold:
            record = True
            print("flash")
        if record == True:
            x_y_pair = ((int(rect.right() + rect.left()) / 2),
                        int((rect.top() + rect.bottom()) / 2))
            print(x_y_pair)
            position_tuple_list.append(x_y_pair)
        pt1 = (int(rect.left()), int(rect.top()))  # left-top point
        pt2 = (int(rect.right()), int(rect.bottom()))  # right-buttom point
        cv2.rectangle(img, pt1, pt2, (255, 255, 255), 3)
        # print ("Object tracked at [{}, {}] \r".format(pt1, pt2),)
        if dispLoc:
            loc = (int(rect.left()), int(rect.top() - 20))
            txt = "Object tracked at [{}, {}]".format(pt1, pt2)
            cv2.putText(img, txt, loc, cv2.FONT_HERSHEY_SIMPLEX, .5,
                        (255, 255, 255), 1)
        cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
        cv2.imshow("Image", img)
        # Continue until the user presses ESC key
        if cv2.waitKey(1) == 27:
            break

    # Relase the VideoCapture object
    cam.release()
    position_tuple_list = np.array(position_tuple_list)
    np.save(str(source) + ".npy", position_tuple_list)
    def update(self):
        # cap, tracker, track_pos_prev:
        ret, img = self.cap.read()
        self.tracker.update(img)
        pos = self.tracker.get_position()
        track_pos_current = [(pos.left() + pos.right()) / 2., (pos.top() + pos.bottom()) / 2.]
        vertical_ratio = (track_pos_current[1] - self.track_pos_prev[1]) / img.shape[1]
        # we add the velocity is propotional to the hand motion function
        self.bird.flap_up_velocity = vertical_ratio / jump_vertical_ratio * 1

        self.track_pos_prev = track_pos_current

        print("Vertical ration is %f" % vertical_ratio)
        # if the ratio is larger than jump_vertical_ratio, then it is a jump
        if vertical_ratio > jump_vertical_ratio:
            if self.phase[0] or self.phase[1]:
                jump.play()
                self.bird.flap()
                if self.phase[0]:
                    self.phase[0] = False
                    self.phase[1] = True
            elif self.phase[3]:
                self.phase[3] = False
                self.start(self.cap, self.tracker, self.track_pos_prev)



        cv2.rectangle(img, (int(pos.right()), int(pos.bottom())), (int(pos.left()), int(pos.top())), (255, 0, 0),0)
        left = max(0, int(pos.left()))
        right = min(img.shape[0], int(pos.right()))
        top = max(0, int(pos.top()))
        bottom = min(img.shape[1], int(pos.bottom()))
        crop_img = img[top:bottom, left:right]
        cv2.imshow('Gesture', img)
        #cv2.imshow('Hand', crop_img)
        #cv2.waitKey(16)

        if self.phase[3]:
            # we restart here
            track_flag = False
            self.tracker = dlib.correlation_tracker()
            while self.cap.isOpened():
                ret, img = self.cap.read()
                if not track_flag:
                    count_defects = detect_hand(img, hand_pos)
                    if count_defects > hand_convex_number:
                        cv2.destroyWindow('Thresholded')
                        self.tracker.start_track(img, dlib.rectangle(hand_pos[0], hand_pos[1], hand_pos[2], hand_pos[3]))
                        track_flag = True
                        pos = self.tracker.get_position()
                        self.track_pos_prev = [(pos.left() + pos.right()) / 2., (pos.top() + pos.bottom()) / 2.]
                        # we start the game if there is a hand detected
                        self.__init__()
                        self.start(self.cap, self.tracker, self.track_pos_prev)
                        frame.start()
                        return
                    else:
                        k = cv2.waitKey(1)
                        if k == 27:
                            break
            return

        if self.bird.out(-float('inf'), height - 42):
            bg_music.pause()
            end.play()
            self.phase[1] = False
            self.phase[2] = False
            self.phase[3] = True
            return

        if self.pipes[0].pos[0] + half_gap[0] < 0:
            self.pipes.pop(0)
            x = self.pipes[-1].pos[0] + 150
            self.pipes.append(Pipe([x, random.randrange(gap_pos_min, gap_pos_max)], random.randrange(-5, 5)))

        if self.phase[0]:
            self.bird.fly()
            self.bird.pos[1] = height / 2 + 4 * math.sin(self.bird.time)
            self.ground.move()

        elif self.phase[1]:
            self.bird.fly()
            self.bird.fall()
            self.ground.move()
            for pipe in self.pipes:
                pipe.move()
                if pipe.pos[0] == self.bird.pos[0]:
                    coin.play()
                    self.score += 1
                if self.bird.between(pipe.pos[0] - half_gap[0], pipe.pos[0] + half_gap[0]):
                    if self.bird.out(pipe.pos[1] - half_gap[1], pipe.pos[1] + half_gap[1]):
                        bump.play()
                        self.bird.vel = 0
                        self.phase[1] = False
                        self.phase[2] = True
                        if self.score > self.best:
                            self.best = self.score
                            self.new = 'new'
                        else:
                            self.new = ''

        elif self.phase[2]:
            self.bird.fall()
示例#33
0
    def get_frame(self):
        if self.vs.isOpened():
            ret, frame = self.vs.read()
            frame = imutils.resize(frame, width=900)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.H, self.W = frame.shape[:2]

            status = "Waiting"
            rects = []
            if self.totalFrames % 5 == 0:
                status = "Detecting"

                self.trackers = []
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (self.W, self.H),
                                             127.5)
                self.net.setInput(blob)
                detections = self.net.forward()
                for i in np.arange(0, detections.shape[2]):
                    confidence = detections[0, 0, i, 2]
                    if confidence > 0.4:
                        idx = int(detections[0, 0, i, 1])
                        if idx == 15:
                            box = detections[0, 0, i, 3:7] * np.array(
                                [self.W, self.H, self.W, self.H])
                            (startX, startY, endX, endY) = box.astype("int")
                            cv2.rectangle(frame, (startX, startY),
                                          (endX, endY), (0, 255, 255), 2)
                            #centroid=(int((startX+endX)/2),int((startY+endY)/2))
                            #person detection
                            #if centroid[1]<= self.H-230 and centroid[1]>= self.H-320:
                            tracker = dlib.correlation_tracker()
                            rect = dlib.rectangle(startX, startY, endX, endY)
                            tracker.start_track(rgb, rect)
                            self.trackers.append(tracker)
            else:
                for tracker in self.trackers:
                    status = "Tracking"
                    tracker.update(rgb)
                    pos = tracker.get_position()
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())
                    rects.append((startX, startY, endX, endY))

            #cv2.line(frame, (0, self.H-320), (self.W, self.H-320), (255, 0, 0), 2)
            cv2.line(frame, (0, self.H // 2), (self.W, self.H // 2),
                     (0, 255, 255), 2)
            #cv2.line(frame, (0, self.H-230), (self.W, self.H-230), (255, 0, 0), 2)
            objects = self.ct.update(rects)
            for (objectID, centroid) in objects.items():
                to = self.trackableObjects.get(objectID, None)
                if to is None:
                    to = TrackableObject(objectID, centroid)

                else:
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)
                    if not to.counted:
                        if direction < 0 and centroid[1] < self.H // 2:
                            self.totalDown += 1
                            to.counted = True
                        if direction > 0 and centroid[1] > self.H // 2:
                            self.totalUp += 1
                            to.counted = True
                self.trackableObjects[objectID] = to
            self.totalFrames += 1
            info = [
                ("IN", self.totalUp),
                ("Status", status),
            ]
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                #cv2.putText(frame, text, (10, self.H - ((i * 20) + 20)),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
                frame = cv2.resize(frame, (500, 500),
                                   interpolation=cv2.INTER_AREA)
            global count
            global count1
            count1 = self.totalUp
            count = self.totalDown
            if ret:
                # Return a boolean success flag and the current frame converted to BGR
                return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
            else:
                return (ret, None)
        else:
            return (ret, None)
示例#34
0
    for oid in ids_to_delete:
        cur_objects.pop(oid)

    print "detecting faces"
    dets = detector(img, 2)

    print "faces detected: {0}".format(len(dets))
    for i, d in enumerate(dets):
        is_new = check_if_new(d, cur_objects)
        print "detection {0} is new? {1}".format(i, is_new)
        if is_new:
            new_ob = {}
            new_ob_id = str(uuid.uuid4())
            new_ob["id"] = new_ob_id
            print "starting tracking new object {0}".format(new_ob_id)
            new_ob["tracker"] = dlib.correlation_tracker()
            new_ob["tracker"].start_track(img, d)
            new_ob["start_frame"] = frame_name
            new_ob["confirmed"] = True
            #todo: face recognition
            cur_objects[new_ob_id] = new_ob

    for oid in cur_objects:
        pos = cur_objects[oid]["tracker"].get_position()
        obj_pos = {}
        obj_pos["top"] = pos.top()
        obj_pos["left"] = pos.left()
        obj_pos["right"] = pos.right()
        obj_pos["bottom"] = pos.bottom()
        frame_data["objs"][oid] = {}
        frame_data["objs"][oid]["pos"] = obj_pos
def Cov():
    global time
    try:
        from pyimagesearch.centroidtracker import CentroidTracker
        from pyimagesearch.trackableobject import TrackableObject
        from imutils.video import VideoStream
        from imutils.video import FPS
        import numpy as np
        import argparse
        import imutils
        import time
        import dlib
        import cv2
        from datetime import datetime
        import pyodbc
        conn = pyodbc.connect(
            'DRIVER={SQL Server};SERVER=182.156.200.178;DATABASE=python;UID=sa;PWD=elmcindia786@'
        )
        entry = conn.cursor()
        exitt = conn.cursor()
        # construct the argument parse and parse the arguments
        ap = argparse.ArgumentParser()
        """CAMERA IS DEFINED HERE!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"""
        cam = "rtsp://*****:*****@[email protected]/cam/realmonitor?channel=1&subtype=0"

        ap.add_argument("-p",
                        "--prototxt",
                        default="mobilenet_ssd/MobileNetSSD_deploy.prototxt",
                        help="path to Caffe 'deploy' prototxt file")
        ap.add_argument("-m",
                        "--model",
                        default="mobilenet_ssd/MobileNetSSD_deploy.caffemodel",
                        help="path to Caffe pre-trained model")
        ap.add_argument("-i",
                        "--input",
                        type=str,
                        default=cam,
                        help="path to optional input video file")
        ap.add_argument("-o",
                        "--output",
                        type=str,
                        default="output/2.mp4",
                        help="path to optional output video file")
        ap.add_argument("-c",
                        "--confidence",
                        type=float,
                        default=0.4,
                        help="minimum probability to filter weak detections")
        ap.add_argument("-s",
                        "--skip-frames",
                        type=int,
                        default=30,
                        help="# of skip frames between detections")
        args = vars(ap.parse_args())

        # initialize the list of class labels MobileNet SSD was trained to
        # detect
        CLASSES = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]

        # load our serialized model from disk
        print("[INFO] loading model...")
        net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

        # if a video path was not supplied, grab a reference to the webcam
        if not args.get("input", False):
            print("[INFO] starting video stream...")
            vs = VideoStream(src=0).start()
            time.sleep(2.0)

        # otherwise, grab a reference to the video file
        else:
            print("[INFO] opening video file...")
            vs = cv2.VideoCapture(args["input"])

        # initialize the video writer (we'll instantiate later if need be)
        writer = None

        # initialize the frame dimensions (we'll set them as soon as we read
        # the first frame from the video)
        W = None
        H = None

        # instantiate our centroid tracker, then initialize a list to store
        # each of our dlib correlation trackers, followed by a dictionary to
        # map each unique object ID to a TrackableObject
        ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        trackers = []
        trackableObjects = {}

        # initialize the total number of frames processed thus far, along
        # with the total number of objects that have moved either up or down
        totalFrames = 0
        totalDown = 0
        totalUp = 0
        Up = []
        Down = []

        # start the frames per second throughput estimator
        fps = FPS().start()

        # loop over frames from the video stream
        while True:
            # grab the next frame and handle if we are reading from either
            # VideoCapture or VideoStream
            frame = vs.read()
            frame = frame[1] if args.get("input", False) else frame

            # if we are viewing a video and we did not grab a frame then we
            # have reached the end of the video
            if args["input"] is not None and frame is None:
                break

            # resize the frame to have a maximum width of 500 pixels (the
            # less data we have, the faster we can process it), then convert
            # the frame from BGR to RGB for dlib
            frame = imutils.resize(frame, width=500)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # if the frame dimensions are empty, set them
            if W is None or H is None:
                (H, W) = frame.shape[:2]

            # if we are supposed to be writing a video to disk, initialize
            # the writer
            if args["output"] is not None and writer is None:
                fourcc = cv2.VideoWriter_fourcc(*"MJPG")
                writer = cv2.VideoWriter(args["output"], fourcc, 30, (W, H),
                                         True)

            # initialize the current status along with our list of bounding
            # box rectangles returned by either (1) our object detector or
            # (2) the correlation trackers
            status = "Waiting"
            rects = []

            # check to see if we should run a more computationally expensive
            # object detection method to aid our tracker
            if totalFrames % args["skip_frames"] == 0:
                # set the status and initialize our new set of object trackers
                status = "Detecting"
                trackers = []

                # convert the frame to a blob and pass the blob through the
                # network and obtain the detections
                blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
                net.setInput(blob)
                detections = net.forward()

                # loop over the detections
                for i in np.arange(0, detections.shape[2]):
                    # extract the confidence (i.e., probability) associated
                    # with the prediction
                    confidence = detections[0, 0, i, 2]

                    # filter out weak detections by requiring a minimum
                    # confidence
                    if confidence > args["confidence"]:
                        # extract the index of the class label from the
                        # detections list
                        idx = int(detections[0, 0, i, 1])

                        # if the class label is not a person, ignore it
                        if CLASSES[idx] != "person":
                            continue

                        # compute the (x, y)-coordinates of the bounding box
                        # for the object
                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                        (startX, startY, endX, endY) = box.astype("int")

                        # construct a dlib rectangle object from the bounding
                        # box coordinates and then start the dlib correlation
                        # tracker
                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        # add the tracker to our list of trackers so we can
                        # utilize it during skip frames
                        trackers.append(tracker)

            # otherwise, we should utilize our object *trackers* rather than
            # object *detectors* to obtain a higher frame processing throughput
            else:
                # loop over the trackers
                for tracker in trackers:
                    # set the status of our system to be 'tracking' rather
                    # than 'waiting' or 'detecting'
                    status = "Tracking"

                    # update the tracker and grab the updated position
                    tracker.update(rgb)
                    pos = tracker.get_position()

                    # unpack the position object
                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    # add the bounding box coordinates to the rectangles list
                    rects.append((startX, startY, endX, endY))

            # draw a horizontal line in the center of the frame -- once an
            # object crosses this line we will determine whether they were
            # moving 'up' or 'down'
            cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the current
                # object ID
                to = trackableObjects.get(objectID, None)

                # if there is no existing trackable object, create one
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
                else:
                    # the difference between the y-coordinate of the *current*
                    # centroid and the mean of *previous* centroids will tell
                    # us in which direction the object is moving (negative for
                    # 'up' and positive for 'down')
                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:
                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        if direction < 0 and centroid[1] < H // 2:
                            totalUp += 1
                            Up.append(totalUp)
                            to.counted = True

                        # if the direction is positive (indicating the object
                        # is moving down) AND the centroid is below the
                        # center line, count the object
                        elif direction > 0 and centroid[1] > H // 2:
                            totalDown += 1
                            Down.append(totalDown)
                            to.counted = True

                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0),
                           -1)

            # construct a tuple of information we will be displaying on the
            # frame
            info = [
                ("Up", totalUp),
                ("Down", totalDown),
                ("Status", status),
            ]
            today = datetime.today()
            if len(Up) > 0:

                print("up:", str(Up))
                print("Log", (str(today)))
                sql = "INSERT INTO [python].[dbo].[peopleCounter] (status, logDateTime, personcount) VALUES ('0','" + str(
                    today) + "','1')"
                entry.execute(sql)
                conn.commit()
                Up.clear()

            if len(Down) > 0:
                print("Down:", str(Down))
                print("Log", (str(today)))
                sql = "INSERT INTO [python].[dbo].[peopleCounter] (status, logDateTime, personcount) VALUES ('1','" + str(
                    today) + "','1')"
                exitt.execute(sql)
                conn.commit()
                Down.clear()

            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # check to see if we should write the frame to disk
            # if writer is not None:
            # 	writer.write(frame)

            # show the output frame
            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            # increment the total number of frames processed thus far and
            # then update the FPS counter
            totalFrames += 1
            fps.update()

        # stop the timer and display FPS information
        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        # check to see if we need to release the video writer pointer
        if writer is not None:
            writer.release()
            time.sleep(1)
            Recall()

        # if we are not using a video file, stop the camera video stream
        if not args.get("input", False):
            vs.stop()

        # otherwise, release the video file pointer
        else:
            vs.release()

        # close any open windows
        cv2.destroyAllWindows()
    except Exception as e:
        print(e)
        time.sleep(1)
        print("sd")
        Recall()
示例#36
0
def trackMultipleObjects():
    rectangleColor = (0, 255, 0)
    frameCounter = 0
    currentCarID = 0
    currentBikeID = 0
    fps = 0

    carTracker = {}
    bikeTracker = {}
    bikeNumbers = {}
    carNumbers = {}
    bikeLocation1 = {}
    carLocation1 = {}
    bikeLocation2 = {}
    carLocation2 = {}
    speed = [None] * 1000
    go = [False for i in range(1000)]
    identity = [0 for i in range(1000)]
    snaps = [False for i in range(1000)]
    types = ["cars" for i in range(1000)]
    Helmets = ["No Helmet Detected" for i in range(1000)]
    # Write output to video file
    out = cv2.VideoWriter('outpy.avi',
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10,
                          (WIDTH, HEIGHT))
    while True:
        start_time = time.time()
        rc, image = video.read()
        if type(image) == type(None):
            break

        image = cv2.resize(image, (WIDTH, HEIGHT))
        resultImage = image.copy()

        frameCounter = frameCounter + 1

        carIDtoDelete = []
        for carID in carTracker.keys():
            trackingQuality = carTracker[carID].update(image)

            if trackingQuality < 7:
                carIDtoDelete.append(carID)

        for carID in carIDtoDelete:
            print('Removing carID ' + str(carID) + ' from list of trackers.')
            print('Removing carID ' + str(carID) + ' previous location.')
            print('Removing carID ' + str(carID) + ' current location.')
            carTracker.pop(carID, None)
            carLocation1.pop(carID, None)
            carLocation2.pop(carID, None)

        if not (frameCounter % 10):
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            cars = carCascade.detectMultiScale(gray, 1.1, 13, 18, (24, 24))
            bikes = bikeCascade.detectMultiScale(gray, 1.1, 13, 18, (24, 24))
            for (_x, _y, _w, _h) in cars:
                x = int(_x)
                y = int(_y)
                w = int(_w)
                h = int(_h)

                x_bar = x + 0.5 * w
                y_bar = y + 0.5 * h

                matchCarID = None

                for carID in carTracker.keys():
                    trackedPosition = carTracker[carID].get_position()

                    t_x = int(trackedPosition.left())
                    t_y = int(trackedPosition.top())
                    t_w = int(trackedPosition.width())
                    t_h = int(trackedPosition.height())

                    t_x_bar = t_x + 0.5 * t_w
                    t_y_bar = t_y + 0.5 * t_h

                    if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <=
                                                           (t_y + t_h))
                            and (x <= t_x_bar <= (x + w)) and (y <= t_y_bar <=
                                                               (y + h))):
                        matchCarID = carID

                if matchCarID is None:
                    print('Creating new tracker ' + str(currentCarID))

                    tracker = dlib.correlation_tracker()
                    tracker.start_track(image,
                                        dlib.rectangle(x, y, x + w, y + h))

                    carTracker[currentCarID] = tracker
                    carLocation1[currentCarID] = [x, y, w, h]

                    currentCarID = currentCarID + 1
            for (_x, _y, _w, _h) in bikes:
                x = int(_x)
                y = int(_y)
                w = int(_w)
                h = int(_h)

                x_bar = x + 0.5 * w
                y_bar = y + 0.5 * h

                matchCarID = None

                for carID in carTracker.keys():
                    trackedPosition = carTracker[carID].get_position()

                    t_x = int(trackedPosition.left())
                    t_y = int(trackedPosition.top())
                    t_w = int(trackedPosition.width())
                    t_h = int(trackedPosition.height())

                    t_x_bar = t_x + 0.5 * t_w
                    t_y_bar = t_y + 0.5 * t_h

                    if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <=
                                                           (t_y + t_h))
                            and (x <= t_x_bar <= (x + w)) and (y <= t_y_bar <=
                                                               (y + h))):
                        matchCarID = carID

                if matchCarID is None:
                    print('Creating new tracker ' + str(currentCarID))

                    tracker = dlib.correlation_tracker()
                    tracker.start_track(image,
                                        dlib.rectangle(x, y, x + w, y + h))

                    carTracker[currentCarID] = tracker
                    carLocation1[currentCarID] = [x, y, w, h]
                    types[currentCarID] = "bikes"
                    currentCarID = currentCarID + 1

        #cv2.line(resultImage,(0,480),(1280,480),(255,0,0),5)

        for carID in carTracker.keys():
            trackedPosition = carTracker[carID].get_position()

            t_x = int(trackedPosition.left())
            t_y = int(trackedPosition.top())
            t_w = int(trackedPosition.width())
            t_h = int(trackedPosition.height())

            #cv2.rectangle(resultImage, (t_x, t_y), (t_x + t_w, t_y + t_h), rectangleColor, 4)

            # speed estimation
            carLocation2[carID] = [t_x, t_y, t_w, t_h]

        end_time = time.time()
        fps = 0.0

        #cv2.putText(resultImage, 'FPS: ' + str(int(fps)), (620, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
        fm = 0
        for i in carLocation1.keys():
            if frameCounter % 1 == 0:
                [x1, y1, w1, h1] = carLocation1[i]
                [x2, y2, w2, h2] = carLocation2[i]
                #print 'previous location: ' + str(carLocation1[i]) + ', current location: ' + str(carLocation2[i])
                carLocation1[i] = [x2, y2, w2, h2]

                # print 'new previous location: ' + str(carLocation1[i])
                if [x1, y1, w1, h1] != [x2, y2, w2, h2]:
                    #print("Going")

                    #	if snaps[i] == False:
                    #roi = resultImage[y2:y2+h2,x2:x2+w2]
                    #		if types[i]=="bikes":
                    #			roi = resultImage[y2:y2+h2,x2:x2+w2]
                    #			result = helm.detect(roi)
                    #		snaps[i]=True
                    #		continue
                    result = False
                    roi = resultImage[y1:y1 + h1, x1:x1 + w1]
                    if types[i] == "bikes" and Helmets[
                            i] == "No Helmet Detected" and identity[
                                i] < OPTIMISE:
                        result = helm.detect(roi)
                    if result == True:
                        Helmets[i] = "Helmet Detected"

                #	if y1 >= 275 and y1 <= 285:
                    if 7 == 7:
                        if not (end_time == start_time):
                            fps = 1.0 / (end_time - start_time)
                        speed[i] = estimateSpeed([x1, y1, w1, h1],
                                                 [x2, y2, w2, h2], fps)
                        #print(str(speed[i]))
                    #if y1 > 275 and y1 < 285:
                    if int(speed[i]) > 40:
                        speed[i] = speed[i] % 40
                    if go[i] == True and int(speed[i]) < 10:
                        speed[i] = speed[i] + 15
                    if int(speed[i]) == 0:
                        continue
                    if identity[i] % LAG == 0:
                        if int(speed[i]) > 30:
                            go[i] = True
                            #if we want to find overspeeding speed just print speed[i]

                            cv2.putText(resultImage, "OverSpeeding ALERT",
                                        (int(x1 + w1 / 2), int(y1 - 5)),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                        (0, 0, 255), 2)
                        elif speed[i] != None and y1 >= 180 and speed[i] != 0:
                            ans = str(int(speed[i])) + " km/hr "
                            if types[i] == "bikes":
                                ans = ans + Helmets[i]
                            cv2.putText(resultImage, ans,
                                        (int(x1 + w1 / 2), int(y1 - 5)),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                        (0, 255, 0), 2)
                    identity[i] += 1
                    #print ('CarID ' + str(i) + ': speed is ' + str("%.2f" % round(speed[i], 0)) + ' km/h.\n')

                    #else:
                    #	cv2.putText(resultImage, "Far Object", (int(x1 + w1/2), int(y1)),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
            fm += 1

            #print ('CarID ' + str(i) + ' Location1: ' + str(carLocation1[i]) + ' Location2: ' + str(carLocation2[i]) + ' speed is ' + str("%.2f" % round(speed[i], 0)) + ' km/h.\n')

        cv2.imshow('result', resultImage)
        # Write the frame into the file 'output.avi'
        #out.write(resultImage)
        #print(fm)

        if cv2.waitKey(33) == 27:
            break

    cv2.destroyAllWindows()
示例#37
0
def trackMultipleObjects():
	rectangleColor = (0, 255, 0)
	frameCounter = 0
	currentCarID = 0
	fps = 0
	
	carTracker = {}
	carNumbers = {}
	carLocation1 = {}
	carLocation2 = {}
	speed = [None] * 1000
	
	# Write output to video file
	out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (WIDTH,HEIGHT))


	while True:
		start_time = time.time()
		rc, image = video.read()
		if type(image) == type(None):
			break
		
		image = cv2.resize(image, (WIDTH, HEIGHT))
		resultImage = image.copy()
		
		frameCounter = frameCounter + 1
		
		carIDtoDelete = []

		for carID in carTracker.keys():
			trackingQuality = carTracker[carID].update(image)
			
			if trackingQuality < 7:
				carIDtoDelete.append(carID)
				
		for carID in carIDtoDelete:
			print ('Removing carID ' + str(carID) + ' from list of trackers.')
			print ('Removing carID ' + str(carID) + ' previous location.')
			print ('Removing carID ' + str(carID) + ' current location.')
			carTracker.pop(carID, None)
			carLocation1.pop(carID, None)
			carLocation2.pop(carID, None)
		
		if not (frameCounter % 10):
			gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
			cars = carCascade.detectMultiScale(gray, 1.1, 13, 18, (24, 24))
			
			for (_x, _y, _w, _h) in cars:
				x = int(_x)
				y = int(_y)
				w = int(_w)
				h = int(_h)
			
				x_bar = x + 0.5 * w
				y_bar = y + 0.5 * h
				
				matchCarID = None
			
				for carID in carTracker.keys():
					trackedPosition = carTracker[carID].get_position()
					
					t_x = int(trackedPosition.left())
					t_y = int(trackedPosition.top())
					t_w = int(trackedPosition.width())
					t_h = int(trackedPosition.height())
					
					t_x_bar = t_x + 0.5 * t_w
					t_y_bar = t_y + 0.5 * t_h
				
					if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <= (t_y + t_h)) and (x <= t_x_bar <= (x + w)) and (y <= t_y_bar <= (y + h))):
						matchCarID = carID
				
				if matchCarID is None:
					print ('Creating new tracker ' + str(currentCarID))
					
					tracker = dlib.correlation_tracker()
					tracker.start_track(image, dlib.rectangle(x, y, x + w, y + h))
					
					carTracker[currentCarID] = tracker
					carLocation1[currentCarID] = [x, y, w, h]

					currentCarID = currentCarID + 1
		
		#cv2.line(resultImage,(0,480),(1280,480),(255,0,0),5)


		for carID in carTracker.keys():
			trackedPosition = carTracker[carID].get_position()
					
			t_x = int(trackedPosition.left())
			t_y = int(trackedPosition.top())
			t_w = int(trackedPosition.width())
			t_h = int(trackedPosition.height())
			
			cv2.rectangle(resultImage, (t_x, t_y), (t_x + t_w, t_y + t_h), rectangleColor, 4)
			
			# speed estimation
			carLocation2[carID] = [t_x, t_y, t_w, t_h]
		
		end_time = time.time()
		
		if not (end_time == start_time):
			fps = 1.0/(end_time - start_time)
		
		#cv2.putText(resultImage, 'FPS: ' + str(int(fps)), (620, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)


		for i in carLocation1.keys():	
			if frameCounter % 1 == 0:
				[x1, y1, w1, h1] = carLocation1[i]
				[x2, y2, w2, h2] = carLocation2[i]
		
				# print 'previous location: ' + str(carLocation1[i]) + ', current location: ' + str(carLocation2[i])
				carLocation1[i] = [x2, y2, w2, h2]
		
				# print 'new previous location: ' + str(carLocation1[i])
				if [x1, y1, w1, h1] != [x2, y2, w2, h2]:
					if (speed[i] == None or speed[i] == 0) and y1 >= 275 and y1 <= 285:
						speed[i] = estimateSpeed([x1, y1, w1, h1], [x2, y2, w2, h2])

					#if y1 > 275 and y1 < 285:
					if speed[i] != None and y1 >= 180:
						cv2.putText(resultImage, str(int(speed[i])) + " km/hr", (int(x1 + w1/2), int(y1-5)),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)
					
					#print ('CarID ' + str(i) + ': speed is ' + str("%.2f" % round(speed[i], 0)) + ' km/h.\n')

					#else:
					#	cv2.putText(resultImage, "Far Object", (int(x1 + w1/2), int(y1)),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

						#print ('CarID ' + str(i) + ' Location1: ' + str(carLocation1[i]) + ' Location2: ' + str(carLocation2[i]) + ' speed is ' + str("%.2f" % round(speed[i], 0)) + ' km/h.\n')
		cv2.imshow('result', resultImage)
		# Write the frame into the file 'output.avi'
		#out.write(resultImage)


		if cv2.waitKey(33) == 27:
			break
	
	cv2.destroyAllWindows()
predictor_path = sys.argv[1]
faces_folder_path = sys.argv[2]

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
win = dlib.image_window()

cap = cv2.VideoCapture(0)

# for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
#    print("Processing file: {}".format(f))

# Create the correlation tracker - the object needs to be initialized
# before it can be used
tracker = dlib.correlation_tracker()
trackerShape = dlib.correlation_tracker()

# k = 0
while 1:

    # Take each frame
    _, frame = cap.read()

    img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)
    #    img = io.imread(frame)

    #   cv2.imshow('frame',frame)
    #   cv2.setWindowTitle('Reconecimento facial')
def gen():
    writer = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    # initialize the total number of frames processed thus far, along
    # with the total number of objects that have moved either up or down
    totalFrames = 0
    totalDown = 0
    totalUp = 0
    skip_frames = 30

    # start the frames per second throughput estimator
    fps = FPS().start()
    while True:
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        sucess, frame = vs.read()

        # if we are viewing a video and we did not grab a frame then we
        #ave reached the end of the video
        if "videos/example_01.mp4" is not None and frame is None:
            print("noframe")
            break
        # resize the frame to have a maximum width of 500 pixels (the
        # less data we have, the faster we can process it), then convert
        # the frame from BGR to RGB for dlib
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]
            # if we are supposed to be writing a video to disk, initialize
            # the writer
        status = "Waiting"
        rects = []
        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % skip_frames == 0:
            print("detection")
            # set the status and initialize our new set of object trackers
            status = "Detecting"
            trackers = []
            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
            net.setInput(blob)
            detections = net.forward()
            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                print("first forloop")
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]
                # filter out weak detections by requiring a minimum
                # confidence
                if confidence > 0.4:
                    print("yes greater")
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])
                    # if the class label is not a person, ignore it
                    if CLASSES[idx] != "person":
                        print("yes person")
                        continue
                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")
                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)
                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)
            # otherwise, we should utilize our object *trackers* rather than
            # object *detectors* to obtain a higher frame processing throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # set the status of our system to be 'tracking' rather
                # than 'waiting' or 'detecting'
                status = "Tracking"
                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()
                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())
                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))
                # draw a horizontal line in the center of the frame -- once an
                # object crosses this line we will determine whether they were
                # moving 'up' or 'down'
        cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)
        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)
            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)
                # otherwise, there is a trackable object so we can utilize it
                # to determine direction
            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids will tell
                # us in which direction the object is moving (negative for
                # 'up' and positive for 'down')
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)
                # check to see if the object has been counted or not
                if not to.counted:
                    # if the direction is negative (indicating the object
                    # is moving up) AND the centroid is above the center
                    # line, count the object
                    if direction < 0 and centroid[1] < H // 2:
                        totalUp += 1
                        to.counted = True
                    # if the direction is positive (indicating the object
                    # is moving down) AND the centroid is below the
                    # center line, count the object
                    elif direction > 0 and centroid[1] > H // 2:
                        totalDown += 1
                        to.counted = True
            # store the trackable object in our dictionary
            trackableObjects[objectID] = to
            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
            cv2.imwrite("3.jpg", frame)
        # construct a tuple of information we will be displaying on the
        # frame
        info = [
            ("Up", totalUp),
            ("Down", totalDown),
            ("Status", status),
        ]
        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            # check to see if we should write the frame to disk
        (flag, encodedImage) = cv2.imencode(".jpg", frame)
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encodedImage) +
               b'\r\n')
示例#40
0
 def __init__(self):
     self.tracker=dlib.correlation_tracker()
示例#41
0
	def main_process(self):
		Base={
			"max_disappear": 30,

			"max_distance": 200,

			"track_object": 4,

			"confidence": 0.4,

			"frame_height": 400,

			"line_point" : 125,

			"display": "true",

			"model_path": "MobileNetSSD_deploy.caffemodel",

			"prototxt_path": "MobileNetSSD_deploy.prototxt",

			"output_path": "output",

			"csv_name": "log.csv"
		}

		CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
			"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
			"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
			"sofa", "train", "tvmonitor"]

		print("[INFO] loading model...")
		net = cv2.dnn.readNetFromCaffe(Base["prototxt_path"],
			Base["model_path"])


		print("[INFO] warming up camera...")
		vs = cv2.VideoCapture(self.filename)

		H = None
		W = None

		ct = CentroidTracker(maxDisappeared=Base["max_disappear"],
			maxDistance=Base["max_distance"])
		trackers = []
		trackableObjects = {}

		totalFrames = 0

		logFile = None

		points = [("A", "B"), ("B", "C"), ("C", "D")]

		fps = FPS().start()

		while True:
			ret, frame  = vs.read()
			ts = datetime.now()
			newDate = ts.strftime("%m-%d-%y")
			minut=ts.minute

			if frame is None:
				break

			frame = imutils.resize(frame, height=Base["frame_height"])
			rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

			if W is None or H is None:
				(H, W) = frame.shape[:2]

			rects = []

			if totalFrames % Base["track_object"] == 0:
				trackers = []

				blob = cv2.dnn.blobFromImage(frame, size=(300, 300),
					ddepth=cv2.CV_8U)
				net.setInput(blob, scalefactor=1.0/127.5, mean=[127.5,
					127.5, 127.5])
				detections = net.forward()

				# loop over the detections
				for i in np.arange(0, detections.shape[2]):
					confidence = detections[0, 0, i, 2]

					if confidence > Base["confidence"]:
						idx = int(detections[0, 0, i, 1])

						if CLASSES[idx] != "car":
							if CLASSES[idx] != "bus":
								if CLASSES[idx] != "motorbike":
									continue

						box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
						(startX, startY, endX, endY) = box.astype("int")

						tracker = dlib.correlation_tracker()
						rect = dlib.rectangle(int(startX), int(startY), int(endX), int(endY))
						tracker.start_track(rgb, rect)
						cv2.rectangle(frame, (startX, startY), (endX, endY), (0,225,0), 4)
						trackers.append(tracker)

			else:
				for tracker in trackers:
					tracker.update(rgb)
					pos = tracker.get_position()

					startX = int(pos.left())
					startY = int(pos.top())
					endX = int(pos.right())
					endY = int(pos.bottom())
					cv2.rectangle(frame, (startX, startY), (endX, endY), (0,225,0), 4)
					rects.append((startX, startY, endX, endY))

			objects = ct.update(rects)

			for (objectID, centroid) in objects.items():
				to = trackableObjects.get(objectID, None)

				if to is None:
					to = TrackableObject(objectID, centroid)

				elif not to.estimated:
					 
					y = [c[1] for c in to.centroids]
					direction = centroid[1] - np.mean(y)
					to.direction = direction
					if(to.direction>0):
						tet = "down"
						cv2.putText(frame, tet, (centroid[0] - 10, centroid[1] - 20)
							, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
						if minut%2==0:
							if not to.belowline:
								if(centroid[1] < self.line_point):
									to.belowline = "F"
								else:
									to.belowline = "T"

							else:
								if(to.belowline == "F" and centroid[1] > self.line_point):
									if not to.savethefile:
										#crop = frame[startX:endX, startY:endY]
										cv2.imwrite('output/violation'+str(self.saveno)+'.jpg', frame)
										to.savethefile = 1
										self.saveno += 1
									cv2.circle(frame, (centroid[0]+10, centroid[1]), 4,
									(0, 0, 255), -1)

						else:
							if to.belowline:
								to.belowline = None
							

					elif(to.direction<0):
						tet = "up"
						cv2.putText(frame, tet, (centroid[0] - 10, centroid[1] - 20)
							, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
			
					elif(to.direction==0):
						tet = "stationary"
						cv2.putText(frame, tet, (centroid[0] - 10, centroid[1] - 20)
							, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

				trackableObjects[objectID] = to

				text = "ID {}".format(objectID)
				cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10)
					, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
				cv2.circle(frame, (centroid[0], centroid[1]), 4,
					(0, 255, 0), -1)
				if minut%2==0:
					cv2.line(frame, (0, self.line_point), (2000, self.line_point), (0,0,255), 4)
				else:
					cv2.line(frame, (0, self.line_point), (2000, self.line_point), (0,255,0), 4)

			if Base["display"]=="true":
				cv2.imshow("frame", frame)
				key = cv2.waitKey(1) & 0xFF

				if key == ord("q"):
					break

			
			totalFrames += 1
			fps.update()

		fps.stop()
		print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
		print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

		cv2.destroyAllWindows()
		vs.release()
示例#42
0
def run(source=0, dispLoc=False):
    # Create the VideoCapture object
    cam = cv2.VideoCapture()
    cam.open(source)
    # If Camera Device is not opened, exit the program
    if not cam.isOpened():
        print "Video device or file couldn't be opened"
        exit()
    
    print "Press key `p` to pause the video to start tracking"
    while True:
        # Retrieve an image and Display it.
        retval, img = cam.read()
        if not retval:
            print "Cannot capture frame device"
            exit()
        if(cv2.waitKey(10)==ord('p')):
            break
        #cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
        cv2.namedWindow("Image")
        cv2.imshow("Image", img)
    cv2.destroyWindow("Image")

    # Co-ordinates of objects to be tracked 
    # will be stored in a list named `points`
    points = get_points.run(img) 

    if not points:
        print "ERROR: No object to be tracked."
        exit()
    
    cv2.namedWindow("Image")
    cv2.imshow("Image", img)

    # Initial co-ordinates of the object to be tracked 
    # Create the tracker object
    tracker = dlib.correlation_tracker()
    # Provide the tracker the initial position of the object
    tracker.start_track(img, dlib.rectangle(*points[0]))

    while True:
        # Read frame from device or file
        retval, img = cam.read()
        if not retval:
            print "Cannot capture frame device | CODE TERMINATING :("
            exit()
        # Update the tracker  
        tracker.update(img)
        # Get the position of the object, draw a 
        # bounding box around it and display it.
        rect = tracker.get_position()
        pt1 = (int(rect.left()), int(rect.top()))
        pt2 = (int(rect.right()), int(rect.bottom()))
        center = ((pt1[0]+pt2[0])/2 , (pt1[1]+pt2[1])/2)
        print (center)
        cv2.rectangle(img, pt1, pt2, (255, 255, 255), 3)
        #print "Object tracked at [{}, {}] \r".format(pt1, pt2),
        if dispLoc:
            loc = (int(rect.left()), int(rect.top()-20))
            txt = "Object tracked at [{}, {}]".format(pt1, pt2)
            cv2.putText(img, txt, loc , cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255), 1)
        cv2.namedWindow("Image")
        cv2.imshow("Image", img)
        # Continue until the user presses ESC key
        if cv2.waitKey(1) == 27:
            break

    # Relase the VideoCapture object
    cam.release()
示例#43
0
 def __init__(self):
     self.name = 'DSST'
     self.type = 'rect'
     self.tracker = dlib.correlation_tracker()
     self.res = []
     self.feature_type = 'DSST_dlib'
示例#44
0
def create_dlib_tracker(frame, roi):
    tracker = dlib.correlation_tracker()
    (roi_x1, roi_y1, roi_x2, roi_y2) = roi
    tracker.start_track(frame,
                        dlib.rectangle(roi_x1, roi_y1, roi_x2, roi_y2))
    return tracker
示例#45
0
def real_time_test():
   min_distance = 0.01
   knn = Knn()
   knn.run()
   capture = cv2.VideoCapture(0)
   success,frame = capture.read()
   center_pt = (frame.shape[1]//2,frame.shape[0]//2)
   x_center,y_center = center_pt
   curr_rect = None
   i = 0
   not_found_count = 0
   for curr_rect in middleRects(frame.shape,center_x=x_center,center_y=y_center):
      if i == 2:
        break
      i+=1
   tracker = None   
   while (success):
      frame_cpy = frame.copy() 
      cv2.circle(frame_cpy,(x_center,y_center),10,(0,255,255),1)      
      k = cv2.waitKey(5)
      if k == ord('q'):
          break
      success,frame = capture.read()
      x,y,w,h = curr_rect
      cv2.rectangle(frame_cpy,(x,y),(x+w,y+h),(0,255,0),2)
      roi = frame[y:y+h,x:x+w]
      response = knn.processAndPredict(roi)
      distance = np.sum ( np.squeeze(response[3]) )
      #cv2.putText(frame_cpy,'%.2f %s' % (distance,knn.class_names[int(response[0])]),(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255),2) 
      if(distance <= min_distance):
        tracker = dlib.correlation_tracker()
        t_rect = dlib.rectangle(x,y,x+w,y+h)
        tracker.start_track( cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) ,t_rect)
        cv2.rectangle(frame_cpy,(x,y),(x+w,y+h),(0,255,0),2)  
        class_idx = int(response[0])
        class_name = knn.class_names[class_idx]
        txt = '%s(%.2f)' % (class_name,distance) 
        cv2.putText(frame_cpy,txt,(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2) 
      else: 
        if tracker is not None: 
           tracker.update(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))  
           pos = tracker.get_position()
           frame_cpy = frame.copy()
           roi = frame[int(pos.top()):int(pos.bottom()),int(pos.left()):int(pos.right())]
           if(roi.shape[0]==0)or(roi.shape[1]==0):
              distance = 99999
           else:
              response = knn.processAndPredict(roi)
              distance = np.sum ( np.squeeze(response[3]) )
           if(distance < min_distance):
               not_found_count = 0
               txt = '%s(%.2f)' % (class_name,distance) 
               cv2.putText(frame_cpy,txt,(int(pos.left()),int(pos.top())),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
           else:
             not_found_count += 1
             if not_found_count==150: #para de perseguir depois de 150 frames com roi nao detectado
               not_found_count =0
               tracker = None
           cv2.rectangle(frame_cpy,(int(pos.left()),int(pos.top())),(int(pos.right()),int(pos.bottom())),(0,255,0),2)    
      cv2.imshow('',frame_cpy)  
   capture.release()   
   cv2.destroyAllWindows()    
"""
This is the demon program using hand detection/tracking to control a mini game: Parcour Bear
http://www.4399.com/flash/177075_1.htm
author: Di Wu
email: [email protected]
"""
import dlib
import cv2
from classes.parcour_bear_game import ParcourBearGame
from classes.hand_detect import detect_hand
from classes.parcour_bear_game import hand_pos, hand_convex_number


tracker = dlib.correlation_tracker()  # dlib correlation tracker initialisation
cap = cv2.VideoCapture(0)   # capture the video using opencv video capture
ParcourBearGame = ParcourBearGame(cap, tracker)
track_flag = False  # track flag indicate whether we have a hand detected and start tracking

#ParcourBearGame.init_parcour_game()

while cap.isOpened():
    ret, img = cap.read()
    if not track_flag:
        count_defects = detect_hand(img, hand_pos)
        if count_defects > hand_convex_number:
            cv2.destroyWindow('Thresholded')
            tracker.start_track(img, dlib.rectangle(hand_pos[0], hand_pos[1], hand_pos[2], hand_pos[3]))
            track_flag = True
            pos = tracker.get_position()
            track_pos_prev = [(pos.left() + pos.right()) / 2., (pos.top() + pos.bottom()) / 2.]
            # we start the game if there is a hand detected
示例#47
0
    def _track(self, direction=FORWARD):
        """Actual tracking based on existing detections"""

        if direction == FORWARD:
            frame_cache = self._frame_cache
        elif direction == BACKWARD:
            frame_cache = reversed(self._frame_cache)
        else:
            raise NotImplementedError()

        self._trackers = {}
        self._confidences = {}
        self._previous = {}
        new_identifier = 0

        for t, frame in frame_cache:

            # update trackers & end those with low confidence
            for identifier, tracker in list(self._trackers.items()):
                confidence = tracker.update(frame)
                self._confidences[identifier] = confidence
                if confidence < self.track_min_confidence:
                    self._kill_tracker(identifier)

            # match trackers with detections at time t
            detections = [d for _, d, status in self._tracking_graph[t]
                          if status == DETECTION]
            match = self._associate(self._trackers, detections)

            # process all matched trackers
            for d, identifier in match.items():

                # connect the previous position of the tracker
                # to the (current) associated detection
                current = (t, detections[d], DETECTION)
                self._tracking_graph.add_edge(
                    self._previous[identifier], current,
                    confidence=self._confidences[identifier])

                # end the tracker
                self._kill_tracker(identifier)

            # process all unmatched trackers
            for identifier, tracker in self._trackers.items():

                # connect the previous position of the tracker
                # to the current position of the tracker
                position = tracker.get_position()
                position = (
                    position.left(),
                    position.top(),
                    position.right(),
                    position.bottom()
                )
                current = (t, position, direction)
                self._tracking_graph.add_edge(
                    self._previous[identifier], current,
                    confidence=self._confidences[identifier])

                # save current position of the tracker for next iteration
                self._previous[identifier] = current

            # start new trackers for all detections
            for d, detection in enumerate(detections):

                # start new tracker
                new_tracker = dlib.correlation_tracker()
                new_tracker.start_track(frame, dlib.drectangle(*detection))
                self._trackers[new_identifier] = new_tracker

                # save previous (t, position, status) tuple
                current = (t, detection, DETECTION)
                self._previous[new_identifier] = current

                # increment tracker identifier
                new_identifier = new_identifier + 1
示例#48
0
def run(source=0, dispLoc=False):
    # Create the VideoCapture object
    cam = cv2.VideoCapture()
    cam.open(source)
    # If Camera Device is not opened, exit the program
    if not cam.isOpened():
        print "Video device or file couldn't be opened"
        exit()
    


    print "Press key `p` to pause the video to start tracking"
    while True:
        # Retrieve an image and Display it.
        retval, img = cam.read()
        if not retval:
            print "Cannot capture frame device"
            exit()
        if(cv2.waitKey(10)==ord('p')):
            break
        #cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
        cv2.namedWindow("Image")
        cv2.imshow("Image", img)
    cv2.destroyWindow("Image")

    # Co-ordinates of objects to be tracked 
    # will be stored in a list named `points`
    points = get_points.run(img) 

    img_height = numpy.size(img,0)
    img_width = numpy.size(img,1)

    deadzone_height = numpy.size(img, 0)/4
    deadzone_width = numpy.size(img, 1)/4

    print ('width =  ',deadzone_height ,'height =  ',deadzone_width)

    if not points:
        print "ERROR: No object to be tracked."
        exit()
    
    cv2.namedWindow("Image")
    cv2.imshow("Image", img)

    # Initial co-ordinates of the object to be tracked 
    # Create the tracker object
    tracker = dlib.correlation_tracker()
    # Provide the tracker the initial position of the object
    tracker.start_track(img, dlib.rectangle(*points[0]))

    old_error_x = 0
    old_error_y = 0
    kI = 0
    kP = 0
    while True:
        # Read frame from device or file
        retval, img = cam.read()
        if not retval:
            print "Cannot capture frame device | CODE TERMINATING :("
            exit()
        # Update the tracker  
        tracker.update(img)
        # Get the position of the object, draw a 
        # bounding box around it and display it.
        rect = tracker.get_position()
        pt1 = (int(rect.left()), int(rect.top()))
        pt2 = (int(rect.right()), int(rect.bottom()))
        center = ((pt1[0]+pt2[0])/2 , (pt1[1]+pt2[1])/2)
 def __init__(self):
     self.tracker = tracker = dlib.correlation_tracker()
     self.success = False
     self.bbox = [0, 0, 0, 0]
     pass
示例#50
0
 def __init__(self, img, bb, rep):
     self.t = dlib.correlation_tracker()
     self.t.start_track(img, bb)
     self.rep = rep
     self.bb = bb
     self.pings = 0
def gen():
    import sys
    import time
    import json
    import re

    import cv2
    import numpy as np
    import cv2
    import dlib
    import time
    import threading
    import math
    #	from vehicle_counter import VehicleCounter

    road = None
    WIDTH = 1280
    HEIGHT = 720

    def estimateSpeed(location1, location2):
        d_pixels = math.sqrt(
            math.pow(location2[0] - location1[0], 2) +
            math.pow(location2[1] - location1[1], 2))
        # ppm = location2[2] / carWidht
        ppm = 16.8
        d_meters = d_pixels / ppm
        # print("d_pixels=" + str(d_pixels), "d_meters=" + str(d_meters))
        fps = 18
        speed = d_meters * fps * 3.6
        return speed

        # Write output to video file

    #	out = cv2.VideoWriter('./outpy.avi', cv2.cv.CV_FOURCC('M', 'J', 'P', 'G'), 10, (WIDTH, HEIGHT))

    #if len(sys.argv) < 2:
    #	raise Exception("No road specified.")

    road_name = "80_donner_lake"

    with open('settings.json') as f:
        data = json.load(f)
        print(data)
        try:
            road = data[road_name]
        except KeyError:
            raise Exception('Road name not recognized.')

    WAIT_TIME = 1

    # Colors for drawing on processed frames
    DIVIDER_COLOR = (255, 255, 0)
    BOUNDING_BOX_COLOR = (255, 0, 0)
    CENTROID_COLOR = (0, 0, 255)

    # For cropped rectangles
    ref_points = []
    ref_rects = []

    def nothing(x):
        pass

    def click_and_crop(event, x, y, flags, param):
        global ref_points

        if event == cv2.EVENT_LBUTTONDOWN:
            ref_points = [(x, y)]

        elif event == cv2.EVENT_LBUTTONUP:
            (x1, y1), x2, y2 = ref_points[0], x, y

            ref_points[0] = (min(x1, x2), min(y1, y2))

            ref_points.append((max(x1, x2), max(y1, y2)))

            ref_rects.append((ref_points[0], ref_points[1]))

    # Write cropped rectangles to file for later use/loading
    def save_cropped():
        global ref_rects

        with open('../Car-Speed-Detection-master/settings.json', 'r+') as f:
            data = json.load(f)
            data[road_name]['cropped_rects'] = ref_rects

            f.seek(0)
            json.dump(data, f, indent=4)
            f.truncate()

        print('Saved ref_rects to settings.json!')

    # Load any saved cropped rectangles
    def load_cropped():
        global ref_rects

        ref_rects = road['cropped_rects']

        print('Loaded ref_rects from settings.json!')

    # Remove cropped regions from frame
    def remove_cropped(gray, color):
        cropped = gray.copy()
        cropped_color = color.copy()

        for rect in ref_rects:
            cropped[rect[0][1]:rect[1][1], rect[0][0]:rect[1][0]] = 0
            cropped_color[rect[0][1]:rect[1][1],
                          rect[0][0]:rect[1][0]] = (0, 0, 0)

        return cropped, cropped_color

    def filter_mask(mask):
        # I want some pretty drastic closing
        kernel_close = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
        kernel_open = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 8))
        kernel_dilate = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

        # Remove noise
        opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel_open)
        # Close holes within contours
        closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel_close)
        # Merge adjacent blobs
        dilation = cv2.dilate(closing, kernel_dilate, iterations=2)

        return dilation

    def get_centroid(x, y, w, h):
        x1 = w // 2
        y1 = h // 2

        return (x + x1, y + y1)

    def detect_vehicles(mask):

        MIN_CONTOUR_WIDTH = 10
        MIN_CONTOUR_HEIGHT = 10

        contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

        matches = []

        # Hierarchy stuff:
        # https://stackoverflow.com/questions/11782147/python-opencv-contour-tree-hierarchy
        for (i, contour) in enumerate(contours):
            x, y, w, h = cv2.boundingRect(contour)
            contour_valid = (w >= MIN_CONTOUR_WIDTH) and (h >=
                                                          MIN_CONTOUR_HEIGHT)

            if not contour_valid or not hierarchy[0, i, 3] == -1:
                continue

            centroid = get_centroid(x, y, w, h)

            matches.append(((x, y, w, h), centroid))

        return matches

    def process_frame(frame_number, frame, bg_subtractor):
        processed = frame.copy()

        gray = cv2.cvtColor(processed, cv2.COLOR_BGR2GRAY)

        # remove specified cropped regions
        cropped, processed = remove_cropped(gray, processed)

        #if car_counter.is_horizontal:
        cv2.line(processed, (0, 250), (1200, 250), DIVIDER_COLOR, 1)
        #else:
        #	cv2.line(processed, (car_counter.divider, 0), (car_counter.divider, frame.shape[0]), DIVIDER_COLOR, 1)

        fg_mask = bg_subtractor.apply(cropped)
        fg_mask = filter_mask(fg_mask)

        matches = detect_vehicles(fg_mask)

        for (i, match) in enumerate(matches):
            contour, centroid = match

            x, y, w, h = contour

            #cv2.rectangle(processed, (x,y), (x+w-1, y+h-1), BOUNDING_BOX_COLOR, 1)
            cv2.circle(processed, centroid, 2, CENTROID_COLOR, -1)

    #	#.update_count(matches, frame_number, processed)

        cv2.imshow('Filtered Mask', fg_mask)

        return processed, matches

    # https://medium.com/@galen.ballew/opencv-lanedetection-419361364fc0

    def lane_detection(frame):
        gray = cv2.cvtColor(processed, cv2.COLOR_BGR2GRAY)

        cropped = remove_cropped(gray)

    # I was going to use a haar cascade, but i decided against it because I don't want to train one, and even if I did it probably wouldn't work across different traffic cameras

    # I think KNN works better than MOG2, specifically with trucks/large vehicles

    bg_subtractor = cv2.createBackgroundSubtractorKNN(detectShadows=True)
    car_counter = None

    load_cropped()

    cap = cv2.VideoCapture(input)
    #cap = cv2.VideoCapture(road['stream_url'])
    cap.set(cv2.CAP_PROP_BUFFERSIZE, 2)

    cv2.namedWindow('Source Image')
    cv2.setMouseCallback('Source Image', click_and_crop)

    frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

    frame_number = -1

    rectangleColor = (0, 255, 0)
    frameCounter = 0
    currentCarID = 0
    fps = 0

    carTracker = {}
    carNumbers = {}
    carLocation1 = {}
    carLocation2 = {}
    speed = [None] * 1000
    while True:
        frame_number += 1
        ret, frame = cap.read()
        start_time = time.time()
        image = frame
        resultImage = image.copy()
        scale_percent = 60  # percent of original size
        width = int(frame.shape[1] * scale_percent / 100)
        height = int(frame.shape[0] * scale_percent / 100)
        dim = (width, height)
        # resize image
        frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)

        if not ret:
            print('Frame capture failed, stopping...')
            break

        #if car_counter is None:
        #car_counter = VehicleCounter(frame.shape[:2], road, cap.get(cv2.CAP_PROP_FPS), samples=0)

        frame, matches = process_frame(frame_number, frame, bg_subtractor)

        #cv2.imshow('Source Image', frame)
        #cv2.imshow('Processed Image', processed)

        frameCounter = frameCounter + 1

        carIDtoDelete = []

        for carID in carTracker.keys():
            trackingQuality = carTracker[carID].update(frame)

            if trackingQuality < 7:
                carIDtoDelete.append(carID)

        for carID in carIDtoDelete:
            print('Removing carID ' + str(carID) + ' from list of trackers.')
            print('Removing carID ' + str(carID) + ' previous location.')
            print('Removing carID ' + str(carID) + ' current location.')
            carTracker.pop(carID, None)
            carLocation1.pop(carID, None)
            carLocation2.pop(carID, None)

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        for (i, match) in enumerate(matches):
            contour, centroid = match

            x, y, w, h = contour
            #cv2.rectangle(gray, (x, y), (x + w - 1, y + h - 1), (0, 255, 0), 1)
            x_bar = x + 0.5 * w
            y_bar = y + 0.5 * h

            matchCarID = None

            for carID in carTracker.keys():
                trackedPosition = carTracker[carID].get_position()

                t_x = int(trackedPosition.left())
                t_y = int(trackedPosition.top())
                t_w = int(trackedPosition.width())
                t_h = int(trackedPosition.height())

                t_x_bar = t_x + 0.5 * t_w
                t_y_bar = t_y + 0.5 * t_h

                if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <=
                                                       (t_y + t_h))
                        and (x <= t_x_bar <= (x + w)) and (y <= t_y_bar <=
                                                           (y + h))):
                    matchCarID = carID

            if matchCarID is None:
                print('Creating new tracker ' + str(currentCarID))

                tracker = dlib.correlation_tracker()
                tracker.start_track(frame, dlib.rectangle(x, y, x + w, y + h))

                carTracker[currentCarID] = tracker
                carLocation1[currentCarID] = [x, y, w, h]

                currentCarID = currentCarID + 1

        # cv2.line(resultImage,(0,480),(1280,480),(255,0,0),5)

        for carID in carTracker.keys():
            trackedPosition = carTracker[carID].get_position()

            t_x = int(trackedPosition.left())
            t_y = int(trackedPosition.top())
            t_w = int(trackedPosition.width())
            t_h = int(trackedPosition.height())

            cv2.rectangle(frame, (t_x, t_y), (t_x + t_w, t_y + t_h),
                          rectangleColor, 4)

            # speed estimation
            carLocation2[carID] = [t_x, t_y, t_w, t_h]

        end_time = time.time()

        if not (end_time == start_time):
            fps = 1.0 / (end_time - start_time)

        # cv2.putText(resultImage, 'FPS: ' + str(int(fps)), (620, 30),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        for i in carLocation1.keys():
            if frameCounter % 1 == 0:
                [x1, y1, w1, h1] = carLocation1[i]
                [x2, y2, w2, h2] = carLocation2[i]

                # print 'previous location: ' + str(carLocation1[i]) + ', current location: ' + str(carLocation2[i])
                carLocation1[i] = [x2, y2, w2, h2]

                # print 'new previous location: ' + str(carLocation1[i])
                if [x1, y1, w1, h1] != [x2, y2, w2, h2]:
                    if (speed[i] == None
                            or speed[i] == 0) and y1 >= 275 and y1 <= 285:
                        speed[i] = estimateSpeed([x1, y1, w1, h1],
                                                 [x2, y2, w2, h2])

                    # if y1 > 275 and y1 < 285:
                    if speed[i] != None and y1 >= 180:
                        cv2.putText(frame,
                                    str(int(speed[i])) + " km/hr",
                                    (int(x1 + w1 / 2), int(y1 - 5)),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                    (255, 255, 255), 2)

            # print ('CarID ' + str(i) + ': speed is ' + str("%.2f" % round(speed[i], 0)) + ' km/h.\n')

            # else:
            #	cv2.putText(resultImage, "Far Object", (int(x1 + w1/2), int(y1)),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

            # print ('CarID ' + str(i) + ' Location1: ' + str(carLocation1[i]) + ' Location2: ' + str(carLocation2[i]) + ' speed is ' + str("%.2f" % round(speed[i], 0)) + ' km/h.\n')
        cv2.imshow('result', frame)
        frame = cv2.imencode('.jpg', frame)[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
        # Write the frame into the file 'output.avi'
        # out.write(resultImage)

        if cv2.waitKey(33) == 27:
            break

    print('Closing video capture...')
    cap.release()
    cv2.destroyAllWindows()
    print('Done.')