def live_lec_view(self):		
		fps = FPS().start()	
		data = ""
		payload_size = struct.calcsize("L")
		while True:
			while len(data)<payload_size:
				data+=self.s.recv(4096)

			packed_msg_size = data[:payload_size:]
			data = data[payload_size:]
			msg_size = struct.unpack("L", packed_msg_size)[0]

			while len(data)<msg_size:
				data+=self.s.recv(4096)
	
			frame_data = data[:msg_size]
			data = data[msg_size:]	
			frame = pickle.loads(frame_data)
			
			cv2.imshow('Live Lecture - (' + self.lec_host[0] + ' ' + self.lec_host[1] + ')', frame)
			key = cv2.waitKey(1) & 0xFF == ord('q')
			if key:
				self.s.close()
				break
			fps.update()
		cv2.destroyAllWindows()
Exemplo n.º 2
0
class PiVideoStream:
   def __init__(self, resolution=(400,200), framerate=60):

      #Start up camera
      self.camera = PiCamera()
      self.camera.resolution = resolution
      self.camera.framerate = framerate
      self.camera.rotation = -90
      self.rawCap = PiRGBArray(self.camera, size=resolution)
      self.stream = self.camera.capture_continuous(self.rawCap,
         format="bgr", use_video_port=True)

      self.frame = None
      self.stopped = False
      self.fps = FPS().start()

   def start(self):
      Thread(target=self.update, args=()).start()
      return self

   def update(self):
      # Continually grab frames from camera
      for f in self.stream:
         self.frame = f.array
         self.rawCap.truncate(0)
         self.fps.update()

         if self.stopped:
            self.stream.close()
            self.rawCap.close()
            self.camera.close()
            return

   def read(self):
      return self.frame

   def stop(self):
      self.fps.stop()
      self.stopped = True

   def getFPS(self):
      return self.fps.fps()
Exemplo n.º 3
0
	def update(self):
		# keep looping infinitely until the thread is stopped
		print("[INFO] Waiting for Feed")
		fps = FPS().start()
		for f in self.stream:
			#print("[INFO] Reading Feed")
			self.frameCaptured = True
			# grab the frame from the stream and clear the stream in
			# preparation for the next frame
			self.frame = f.array
			fps.update()
			self.rawCapture.truncate(0)
			# if the thread indicator variable is set, stop the thread
			# and resource camera resources
			if self.stopped:
				self.stream.close()
				self.rawCapture.close()
				self.camera.close()
				fps.stop()
				#print("Thread FPS: {: .2f}".format(fps.fps()))
				return
Exemplo n.º 4
0
   def __init__(self, resolution=(400,200), framerate=60):

      #Start up camera
      self.camera = PiCamera()
      self.camera.resolution = resolution
      self.camera.framerate = framerate
      self.camera.rotation = -90
      self.rawCap = PiRGBArray(self.camera, size=resolution)
      self.stream = self.camera.capture_continuous(self.rawCap,
         format="bgr", use_video_port=True)

      self.frame = None
      self.stopped = False
      self.fps = FPS().start()
Exemplo n.º 5
0
 def __init__(self):
     threading.Thread.__init__(self)
     self.running = True
     self.fps_counter = FPS().start()
     self.camera = PiVideoStream(resolution=(config.RAW_FRAME_WIDTH, config.RAW_FRAME_HEIGHT))
     self.camera.start()
     time.sleep(config.CAMERA_WARMUP_TIME) # wait for camera to initialise
     self.frame = None
     self.frame_chroma = None
     self.centre = config.CENTRE
     self.can_see_lines = False
     self.last_yellow_mean = config.WIDTH * 0.8
     self.last_blue_mean = config.WIDTH * 0.2
     self.desired_steering = config.NEUTRAL_STEERING # 0 = left, 0.5 = center, 1 = right
     self.desired_throttle = config.NEUTRAL_THROTTLE # 0 = stop, 0.5 = medium speed, 1 = fastest
Exemplo n.º 6
0
    def calculate_fps(self, frames_no=100):
        fps = FPS().start()

        # Don't wanna display window
        if self.debug:
            self.debug = not self.debug

        for i in range(0, frames_no):
            self.where_lane_be()
            fps.update()

        fps.stop()

        # Don't wanna display window
        if not self.debug:
            self.debug = not self.debug

        print('Time taken: {:.2f}'.format(fps.elapsed()))
        print('~ FPS : {:.2f}'.format(fps.fps()))
Exemplo n.º 7
0
import numpy as np

# Nom de la fenetre d'affichage
window_name = 'preview'

# Creation du thread de lecture + setup
vs=PiVideoStream()
vs.camera.video_stabilization = True
# Demarrage du flux video + warmup de la camera
vs.start()
time.sleep(2.0)

# Creation de la fenetre d'affichage
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)

fps = FPS().start()

while True :

    frame = vs.read()
    fps.update()

    cv2.imshow(window_name, frame) 
    key = cv2.waitKey(1) & 0xFF
    if key == ord("q") :
    	break

fps.stop()
print("Temps passé : {:.2f}".format(fps.elapsed()))
print("Approx. FPS : {:.2f}".format(fps.fps()))
Exemplo n.º 8
0
classes = [
    "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
    "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
    "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"
]

colors = np.random.uniform(0, 255, size=(len(classes), 3))

# load corresponding models
print("[LOG] Loading model..")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

print("[LOG] Starting video stream...")
vs = VideoStream(src=0).start()
# Let's allow the cam to warm up first
fps = FPS().start()

while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=400)

    # grab the frame dimensions and convert it to a blob
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                 (300, 300), 127.5)

    # pass the blob through the network and obtain the detections and
    # predictions
    net.setInput(blob)
Exemplo n.º 9
0
    diff32 = np.float32(frame1) - np.float32(frame2)
    # 441.6729559300637 = np.sqrt(255**2 + 255**2 + 255**2)
    norm32 = np.sqrt(diff32[:,:,0]**2 + diff32[:,:,1]**2 + diff32[:,:,2]**2)/441.6729559300637
    # dist = np.uint8(norm32*255)
    return np.uint8(norm32*255)

# general videocapture from default camera
cap = cv2.VideoCapture(0)
# capturing first two frames
_, frame1 = cap.read()
_, frame2 = cap.read()

time1 = time.time()
activity_count = 0

fps = FPS().start()
# Main Loop
while(True):
    
    #TODO: Activity Monitoring
    _, frame = cap.read()                   # capture image
    rows, cols, _ = np.shape(frame)         # get length & width of image
    # cv2.imshow('dist', frame)               # display normal frame
    dist = distMap(frame1, frame)           # compute pythogorean distance
    frame1 = frame2                         # reassign x[-2] frame
    frame2 = frame                          # reassign x[-1] frame
    mod = cv2.GaussianBlur(dist, (9,9), 0)  # Apply gaussian smoothing
    _, thresh = cv2.threshold(mod, 100, 255, 0)     # Thresholding
    _, stDev = cv2.meanStdDev(mod)          # calculate std deviation test
    # cv2.imshow('dist', mod)                 
    # cv2.putText(frame2, "Standard Deviation - {}".format(round(stDev[0][0],0)), (70, 70), font, 1, (255, 0, 255), 1, cv2.LINE_AA)
Exemplo n.º 10
0
def extractFrame(videoPath):

    videoFrames = []
    playerBoxes = []

    # if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
    # function to create our object tracker
    if args.detector == "tracker":
        if int(major) == 3 and int(minor) < 3:
            if args.singleTracker:
                tracker = cv2.Tracker_create(args.tracker.upper())

        # otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
        # appropriate object tracker constructor:
        else:
            # initialize a dictionary that maps strings to their corresponding
            # OpenCV object tracker implementations

            if args.singleTracker:
                OPENCV_OBJECT_TRACKERS = {
                    "csrt": cv2.legacy.TrackerCSRT_create(),
                    "kcf": cv2.legacy.TrackerKCF_create(),
                    "mil": cv2.legacy.TrackerMIL_create()
                }

                tracker = OPENCV_OBJECT_TRACKERS[args.tracker]()

    # initialize the bounding box coordinates of the object we are going
    # to track
    initBB = None
    # initialize the FPS throughput estimator
    fps = None

    # Set up Neural Net
    net = cv2.dnn.readNet(args.weights, args.config)

    cap = cv2.VideoCapture(videoPath)

    player_threshold = 99999

    if not args.singleTracker:
        # Read first frame
        success, frame = cap.read()
        # quit if unable to read the video file
        if not success:
            print('Failed to read video')
            sys.exit(1)

        ## Select boxes
        bboxes = []
        colors = []

        # OpenCV's selectROI function doesn't work for selecting multiple objects in Python
        # So we will call this function in a loop till we are done selecting all objects
        while True:
            # draw bounding boxes over objects
            # selectROI's default behaviour is to draw box starting from the center
            # when fromCenter is set to false, you can draw box starting from top left corner
            bbox = cv2.selectROI('MultiTracker', frame)
            bboxes.append(bbox)
            colors.append((randint(0, 255), randint(0, 255), randint(0, 255)))
            print("Press q to quit selecting boxes and start tracking")
            print("Press any other key to select next object")
            k = cv2.waitKey(0) & 0xFF
            print(k)
            if (k == 113):  # q is pressed
                break

        print('Selected bounding boxes {}'.format(bboxes))

        createTrackerByName(args.tracker)

        # Create MultiTracker object
        trackers = cv2.legacy.MultiTracker_create()

        # Initialize MultiTracker
        for bbox in bboxes:
            trackers.add(createTrackerByName(args.tracker), frame, bbox)

    frameCount = 0
    while (cap.isOpened()):
        print(frameCount)
        # Take each frame
        _, frame = cap.read()

        if not _:
            print("Video Ended")
            break

        Width = frame.shape[1]
        Height = frame.shape[0]

        # Convert BGR to HSV
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # Hard-Coded Color
        # court_color = np.uint8([[[188, 218, 236]]])
        court_color = np.uint8([[[189, 204, 233]]])

        hsv_court_color = cv2.cvtColor(court_color, cv2.COLOR_BGR2HSV)
        hue = hsv_court_color[0][0][0]

        # define range of blue color in HSV - Again HARD CODED! :(
        lower_color = np.array([hue - 5, 10, 10])
        upper_color = np.array([hue + 5, 225, 225])

        # Threshold the HSV image
        mask = cv2.inRange(hsv, lower_color, upper_color)

        # Opening
        kernel = np.ones((2, 2), np.uint8)
        opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)

        # Bitwise-AND mask and original image
        res = cv2.bitwise_and(frame, frame, mask=opening)
        cv2.imshow('res', res)

        if args.draw_line:
            # Canny Edge Detector
            gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)

            high_thresh, thresh_im = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            low_thresh = 0.5 * high_thresh
            edges = cv2.Canny(gray, low_thresh, high_thresh, apertureSize=3)
            cv2.imshow('Canny Edge Detector', edges)

            # # Hough Lines
            minLineLength = 200
            maxLineGap = 500
            lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, minLineLength=minLineLength, maxLineGap=maxLineGap)

            # Green color in BGR
            LINE_COLOR = (255, 0, 0)

            if lines is None:
                continue
            else:
                a, b, c = lines.shape
                for i in range(2):
                    for x1, y1, x2, y2 in lines[i]:
                        # cv2.line(image, start_point, end_point, color, thickness)
                        if args.draw_line:
                            cv2.line(frame, (x1, y1), (x2, y2), LINE_COLOR, 3)
                        # only compare the lower corner of y value
                        player_threshold = min(player_threshold, y1, y2)

        # Detect People
        if args.detector == "HOG":
            # initialize the HOG descriptor/person detector
            hog = cv2.HOGDescriptor()
            hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

            orig = frame.copy()

            # detect people in the image
            (rects, weights) = hog.detectMultiScale(frame, winStride=(4, 4),
                                                    padding=(8, 8), scale=1.05)
            # draw the original bounding boxes
            for (x, y, w, h) in rects:
                cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
            # apply non-maxima suppression to the bounding boxes using a
            # fairly large overlap threshold to try to maintain overlapping
            # boxes that are still people
            rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
            pick = non_max_suppression(rects, probs=None, overlapThresh=0.1)
            # draw the final bounding boxes
            for (xA, yA, xB, yB) in pick:
                cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)

        elif args.detector == "yolov3":
            scale = 0.00392
            blob = cv2.dnn.blobFromImage(frame, scale, (416, 416), (0, 0, 0), True, crop=False)
            net.setInput(blob)
            outs = net.forward(get_output_layers(net))

            class_ids = []
            confidences = []
            boxes = []
            conf_threshold = 0.5
            nms_threshold = 0.4

            for out in outs:
                for detection in out:
                    scores = detection[5:]
                    class_id = np.argmax(scores)
                    confidence = scores[class_id]
                    if confidence > 0.5:
                        center_x = int(detection[0] * Width)
                        center_y = int(detection[1] * Height)
                        w = int(detection[2] * Width)
                        h = int(detection[3] * Height)
                        x = center_x - w / 2
                        y = center_y - h / 2
                        class_ids.append(class_id)
                        confidences.append(float(confidence))
                        boxes.append([x, y, w, h])

            indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)

            k = 0
            for i in indices:
                i = i[0]
                box = boxes[i]
                x = box[0]
                y = box[1]
                w = box[2]
                h = box[3]
                pad = 5
                # print(player_threshold)
                if (round(y + h) < player_threshold):
                    k += 1
                    continue
                else:
                    draw_prediction(frame, class_ids[i], round(x - pad), round(y - pad), round(x + w + pad),
                                    round(y + h + pad))

        elif args.detector == "tracker":

            # check to see if we are currently tracking an object
            if args.singleTracker:
                if initBB is not None:
                    # grab the new bounding box coordinates of the object
                    (success, box) = tracker.update(frame)
                    # check to see if the tracking was a success
                    if success:
                        (x, y, w, h) = [int(v) for v in box]
                        cv2.rectangle(frame, (x, y), (x + w, y + h),
                                      (0, 255, 0), 2)
                    # update the FPS counter
                    fps.update()
                    fps.stop()
                    # initialize the set of information we'll be displaying on
                    # the frame
                    info = [
                        ("Tracker", tracker),
                        ("Success", "Yes" if success else "No"),
                        ("FPS", "{:.2f}".format(fps.fps())),
                    ]
                    # loop over the info tuples and draw them on our frame
                    for (i, (k, v)) in enumerate(info):
                        text = "{}: {}".format(k, v)
                        cv2.putText(frame, text, (10, Height - ((i * 20) + 20)),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            else:
                videoFrames.append(frame)
                # get updated location of objects in subsequent frames
                success, boxes = trackers.update(frame)
                playerBoxes.append(boxes)

                # draw tracked objects
                for i, newbox in enumerate(boxes):
                    p1 = (int(newbox[0]), int(newbox[1]))
                    p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
                    cv2.rectangle(frame, p1, p2, colors[i], 2, 1)

        else:
            continue

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
        # if the 's' key is selected, we are going to "select" a bounding
        # box to track
        if key == ord("s"):
            if args.singleTracker:
                # select the bounding box of the object we want to track (make
                # sure you press ENTER or SPACE after selecting the ROI)
                initBB = cv2.selectROI("Frame", frame, fromCenter=False,
                                       showCrosshair=True)
                # start OpenCV object tracker using the supplied bounding box
                # coordinates, then start the FPS throughput estimator as well
                tracker.init(frame, initBB)
                fps = FPS().start()
                # if the `q` key was pressed, break from the loop

        elif key == ord("q"):
            break

        frameCount += 1

    cap.release()
    cv2.destroyAllWindows()

    return videoFrames, playerBoxes, Width, Height, colors
Exemplo n.º 11
0
    def algorithm_processing(self):
        if self.isAlgorithm:
            return
        if self.isReSet:
            self.bbox_list = self.bbox_list_predict[self.INIT]
            self.isStatus = self.isReSet
            self.isReSet = False
        if len(self.bbox_list) == 0:
            print('[INFO] Error in b-box choosing.')
            QMessageBox.information(
                self, 'Warning',
                'You Must Confirm B-box First. (Shortcut Key: Alt + B)',
                QMessageBox.Ok)
            return
        self.isPainting = False
        self.pushButton_bboxSetting.setText("&B-box Setting")
        if self.isStatus == 1:
            self.pushButton_locationLoading.setText('&Stream Suspending')
        elif self.isStatus == 2:
            self.pushButton_videoLoading.setText('&Stream Suspending')
        elif self.isStatus == 3:
            self.first_frame = True
            return
        self.label_image.setCursor(QCursor(Qt.ArrowCursor))
        self.isAlgorithm = True
        trackers = []
        self.progressBar.setFormat('INITIALIZE')
        for b in range(len(self.bbox_list)):
            trackers.append(deepcopy(self.tracker))
        print("[INFO] Starting pictures stream.")
        fps_cal = None
        self.first_frame = True
        save_loc_d = './ans/' + self.video_name.split('/')[-1]
        save_loc_t = save_loc_d + '/' + str(self.tracker_name)
        self.save_location = save_loc_t
        self.N = self.INIT
        for frame in self.get_frames(self.video_name):
            if self.first_frame:
                for b in range(len(self.bbox_list)):
                    trackers[b].init(frame, self.bbox_list[b])
                while (len(self.bbox_list_predict) < self.INIT):
                    self.bbox_list_predict.append([])
                if len(self.bbox_list_predict) > self.N:
                    self.bbox_list_predict[self.N] = self.bbox_list
                else:
                    self.bbox_list_predict.append(self.bbox_list)
                self.first_frame = False
                if self.checkBox.checkState():
                    system('mkdir ' + save_loc_t.replace('/', '\\'))
                    system('mkdir ' +
                           (save_loc_t + '/mask').replace('/', '\\'))
                fps_cal = FPS().start()
                self.analysis_init()
                self.pushButton_bboxSetting.setText("&B-box Pausing")
                self.textBrowser.append('————————————\nTotal Frames: ' +
                                        str(self.F))
            else:
                if self.isStatus == 0:
                    break
                masks = None
                self.N += 1
                if len(self.bbox_list_predict) > self.N:
                    self.bbox_list_predict[self.N] = []
                else:
                    self.bbox_list_predict.append([])
                for b in range(len(self.bbox_list)):
                    outputs = trackers[b].track(frame)
                    assert 'polygon' in outputs
                    polygon = np.array(outputs['polygon']).astype(np.int32)
                    cv2.polylines(frame, [polygon.reshape((-1, 1, 2))], True,
                                  (0, 255, 0), 2)
                    # mask label should be (255 - object selecting number)
                    mask = ((outputs['mask'] > cfg.TRACK.MASK_THERSHOLD) *
                            (255 - b))
                    mask = mask.astype(np.uint8)
                    if masks is None:
                        masks = mask
                    else:
                        masks += mask
                    polygon_xmean = (polygon[0] + polygon[2] + polygon[4] +
                                     polygon[6]) / 4
                    polygon_ymean = (polygon[1] + polygon[3] + polygon[5] +
                                     polygon[7]) / 4
                    cv2.rectangle(
                        frame,
                        (int(polygon_xmean) - 1, int(polygon_ymean) - 1),
                        (int(polygon_xmean) + 1, int(polygon_ymean) + 1),
                        (0, 255, 0), 2)
                    self.behavior_analysis(frame, b,
                                           (polygon_xmean, polygon_ymean),
                                           (mask > 0))

                    bbox = tuple(list(map(int, outputs['bbox'])))
                    self.bbox_list_predict[self.N].append(bbox)
                frame[:, :,
                      2] = (masks > 0) * 255 * 0.75 + (masks
                                                       == 0) * frame[:, :, 2]
                fps_cal.update()
                fps_cal.stop()
                text = "FPS: {:.2f}".format(fps_cal.fps())
                cv2.putText(frame, text, (60, 30), cv2.FONT_HERSHEY_SIMPLEX,
                            0.7, (0, 0, 255), 2)

                if self.checkBox.checkState():
                    save_loc_i = save_loc_t + "/" + str(
                        self.N).zfill(4) + ".jpg"
                    cv2.imwrite(save_loc_i, frame)
                    save_loc_m = save_loc_t + "/mask/" + str(
                        self.N).zfill(4) + ".jpg"
                    cv2.imwrite(save_loc_m, masks)
                self.label_image.setPixmap(QPixmap(self.cv2_to_qimge(frame)))
                QApplication.processEvents()
            self.progressBar.setProperty('value', (self.N - self.INIT) * 100 /
                                         (self.F - 1 - self.INIT))
            self.progressBar.setFormat('HANDLE: %p% [{:d}/{:d}]'.format(
                self.N, self.F - 1))
            self.scrollBar.setProperty('value', self.N)
            QApplication.processEvents()

        self.bbox_list = []
        self.rectList = []
        if not self.isReSet:
            self.paint_frame = None
        del trackers, fps_cal
        print("[INFO] Ending pictures stream.")
        if self.checkBox.checkState():
            # save trackless data
            # jpg_list = glob(save_loc_t + '/*.jpg')
            jpg_num_list = []
            for j in range(len(self.bbox_list_predict)):
                if len(self.bbox_list_predict[j]):
                    jpg_num_list.append(j)
            jpg_num_list.sort()
            last_j = 0
            for i, now_j in enumerate(jpg_num_list):
                self.progressBar.setProperty('value',
                                             i * 100 / (len(jpg_num_list) - 1))
                self.progressBar.setFormat('Pausing: %p%')
                QApplication.processEvents()
                for j in range(last_j, now_j):
                    frame = self.get_frame(self.video_name, j)
                    save_loc_i = save_loc_t + "/" + str(j).zfill(4) + ".jpg"
                    cv2.imwrite(save_loc_i, frame)
                    save_loc_m = save_loc_t + "/mask/" + str(j).zfill(
                        4) + ".jpg"
                    cv2.imwrite(save_loc_m,
                                np.zeros((frame.shape[0], frame.shape[1])))
                last_j = now_j + 1
            # save bbox data
            with open(save_loc_t + '/bbox.txt', 'w') as f:
                for item in self.bbox_list_predict:
                    f.write(str(item) + '\n')
            np.save(save_loc_t + '/bbox.npy', np.array(self.bbox_list_predict))
            # save to a video
            if last_j == self.F:
                fourcc = cv2.VideoWriter_fourcc('M', 'P', '4', '2')
                if self.isStatus == 1:
                    frame_0 = self.get_frame(self.video_name, 0)
                    wri = cv2.VideoWriter(save_loc_t + '/video.avi', fourcc,
                                          30,
                                          (frame_0.shape[1], frame.shape[0]))
                elif self.isStatus == 2:
                    cap = cv2.VideoCapture(self.video_name)
                    wri = cv2.VideoWriter(save_loc_t + '/video.avi', fourcc,
                                          int(cap.get(5)),
                                          (int(cap.get(3)), int(cap.get(4))))
                for j in range(last_j):
                    frame = cv2.imread(save_loc_t + '/' + str(j).zfill(4) +
                                       ".jpg")
                    wri.write(frame)
                    self.progressBar.setProperty('value', j * 100 / (last_j))
                    self.progressBar.setFormat('SAVE: %p% [{:d}/{:d}]'.format(
                        j, last_j))
                    QApplication.processEvents()
                pickle.dump(self.analysis_box,
                            open(save_loc_t + '/analysis.pkl', 'wb'),
                            protocol=4)
                self.progressBar.setProperty('value', last_j * 100 / (last_j))
                self.progressBar.setFormat('SAVE: %p% [{:d}/{:d}]'.format(
                    last_j, last_j))
                QApplication.processEvents()
                wri.release()
                self.pushButton_bboxSetting.setText('&B-box Setting')
                QApplication.processEvents()

        self.isStatus = 0
        self.pushButton_locationLoading.setText('&Location Loading')
        self.pushButton_videoLoading.setText('&Video Loading')
        self.progressBar.setFormat('STAND BY')
        self.progressBar.setProperty('value', 0)
        # self.F = f + 1 + self.INIT
        # self.scrollBar.setMaximum(f + self.INIT)
        self.isAlgorithm = False
Exemplo n.º 12
0
import time
from FaceClassifier import FaceClassifier

if __name__ == '__main__':
    #bd cric team
    #classifier = FaceClassifier('/Users/FaceRecognition/embeddings/bdCricClassifier.pkl')
    #fileVideoStream = FileVideoStream('/Users/test_video/bd_cric_team_singing.mp4').start()
    # CSE facculty
    classifier = FaceClassifier(
        '/Users/FaceRecognition/embeddings/classifier.pkl')
    #fileVideoStream = FileVideoStream('/Users/test_video/mzi_fb_page_opening.mp4').start()
    fileVideoStream = FileVideoStream(
        '/Users/test_video/video_saiful_saif.mp4').start()

    time.sleep(1.0)
    fps = FPS().start()
    counter = 0
    temp_bb = None
    temp_name = None

    while fileVideoStream.more():

        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale (while still retaining 3
        # channels)
        frame = fileVideoStream.read()
        frame = imutils.resize(frame, width=450)
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        frame = np.dstack([frame, frame, frame])

        if counter >= 15:
Exemplo n.º 13
0
    def read_from_url(self, url, path_to_out, obj):
        self.url = url
        self.path_to_out = path_to_out
        self.obj = obj

        sk_fr = 30
        conf = 0.4

        CLASSES = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]

        print("[INFO] loading model...")
        net = cv2.dnn.readNetFromCaffe('MobileNetSSD_deploy.prototxt',
                                       'MobileNetSSD_deploy.caffemodel')

        print("[INFO] starting video stream...")
        vs = VideoStream(src=url).start()
        time.sleep(2.0)

        writer = None

        W = None
        H = None

        ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        trackers = []
        trackableObjects = {}

        totalFrames = 0
        total = 0
        fps = FPS().start()

        while True:

            frame = vs.read()
            frame = frame[1] if args.get("input", False) else frame

            frame = imutils.resize(frame, width=500)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            if W is None or H is None:
                (H, W) = frame.shape[:2]

            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(path_to_out, fourcc, 30, (W, H), True)

            status = "Waiting"
            rects = []

            if totalFrames % sk_fr == 0:

                status = "Detecting"
                trackers = []

                blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
                net.setInput(blob)
                detections = net.forward()

                for i in np.arange(0, detections.shape[2]):

                    confidence = detections[0, 0, i, 2]

                    if confidence > conf:

                        idx = int(detections[0, 0, i, 1])

                        if CLASSES[idx] != obj:
                            continue

                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                        (startX, startY, endX, endY) = box.astype("int")

                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        trackers.append(tracker)

            else:
                for tracker in trackers:

                    status = "Tracking"

                    tracker.update(rgb)
                    pos = tracker.get_position()

                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    rects.append((startX, startY, endX, endY))

            objects = ct.update(rects)

            for (objectID, centroid) in objects.items():

                to = trackableObjects.get(objectID, None)

                if to is None:
                    to = TrackableObject(objectID, centroid)

                else:

                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    if not to.counted:

                        if direction < 0 and centroid[1] < H // 2:
                            total += 1
                            to.counted = True

                        elif direction > 0 and centroid[1] > H // 2:
                            total += 1
                            to.counted = True

                trackableObjects[objectID] = to

                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4,
                           (255, 255, 255), -1)

            info = [
                ('Total', total),
                ("Status", status),
            ]

            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)

            if writer is not None:
                writer.write(frame)

            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            if key == ord("q"):
                break

            totalFrames += 1
            fps.update()

        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        if writer is not None:
            writer.release()

        if not args.get("input", False):
            vs.stop()

        else:
            vs.release()

        cv2.destroyAllWindows()
        print(objectID + 1)
Exemplo n.º 14
0
def traking_object():

    global video_path
    global coor_Warehouse
    global count_frame
    global next_box
    global initBB
    count_frame = 1

    (major, minor) = cv2.__version__.split(".")[:2]
    OPENCV_OBJECT_TRACKERS = {
        "csrt": cv2.TrackerCSRT_create,
        "kcf": cv2.TrackerKCF_create,
        "boosting": cv2.TrackerBoosting_create,
        "mil": cv2.TrackerMIL_create,
        "tld": cv2.TrackerTLD_create,
        "medianflow": cv2.TrackerMedianFlow_create,
        "mosse": cv2.TrackerMOSSE_create
    }
    tracker = OPENCV_OBJECT_TRACKERS["csrt"]()

    if video_path == "":
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
    else:
        vs = cv2.VideoCapture(video_path)

    fps = None
    temp = 0
    while True:

        frame = vs.read()
        if True:
            if video_path == "":
                frame = frame
            else:
                frame = frame[1]

        if frame is None:
            break

        frame = imutils.resize(frame, 800)
        (H, W) = frame.shape[:2]

        draw_Coor(frame)

        if (initBB is None and temp == 20) or next_box:
            cv2.imwrite('1.jpg', frame)
            temp_mas = get_pair_labels('1.jpg', frame)

            if temp_mas != None:
                tracker = OPENCV_OBJECT_TRACKERS["csrt"]()
                initBB = (temp_mas[1], temp_mas[3],
                          temp_mas[0] - temp_mas[1] + 20,
                          temp_mas[2] - temp_mas[3] + 20)
                tracker.init(frame, initBB)
                fps = FPS().start()
                next_box = False
            temp = 0

        if initBB is not None:
            (success, box) = tracker.update(frame)

            if success:
                (x, y, w, h) = [int(v) for v in box]
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            fps.update()
            fps.stop()
            info = [
                ("Tracker", "csrt"),
                ("Success", "Yes" if success else "No"),
                ("FPS", "{:.2f}".format(fps.fps())),
            ]

            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            detection_Box(x, y, w, h, frame)

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        count_frame += 1
        temp += 1

        if key == ord("s"):
            initBB = cv2.selectROI("Frame",
                                   frame,
                                   fromCenter=False,
                                   showCrosshair=True)
            print(initBB)
            tracker = OPENCV_OBJECT_TRACKERS["csrt"]()
            tracker.init(frame, initBB)
            fps = FPS().start()

        elif key == ord("q"):
            break
    if video_path == "":
        vs.stop()
    else:
        vs.release()

    cv2.destroyAllWindows()
Exemplo n.º 15
0
def main():
    global fps, vs
    tv = 0
    person = 0
    CanSwitch = False
    OPEN = False
    file_path, model, conf = init_params()
    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(file_path, model)

    # initialize the video stream, allow the cammera sensor to warmup,
    # and initialize the FPS counter
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    time.sleep(2.0)
    fps = FPS().start()

    # loop over the frames from the video stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        frame = imutils.resize(frame, 400)

        # grab the frame dimensions and convert it to a blob
        (h, w) = frame.shape[:2]

        detections = detect_and_predict(frame, net)

        # loop over the detections
        for i in np.arange(0, detections.shape[2]):
            # extract the confidence (i.e., probability) associated with
            # the prediction
            confidence = detections[0, 0, i, 2]

            # filter out weak detections by ensuring the `confidence` is
            # greater than the minimum confidence
            if confidence > conf:
                # extract the index of the class label from the
                # `detections`, then compute the (x, y)-coordinates of
                # the bounding box for the object
                idx = int(detections[0, 0, i, 1])
                box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # draw the prediction on the frame
                label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
                cv2.rectangle(frame, (startX, startY), (endX, endY),
                              COLORS[idx], 2)
                y = startY - 15 if startY - 15 > 15 else startY + 15
                cv2.putText(frame, label, (startX, y),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)

                if CLASSES[idx] == "person":
                    personMiddlePoint = (((startX + endX) / 2),
                                         ((startY + endY) / 2))
                    personXrightlimit = endX
                    personXleftlimit = startX
                    personXhalfSide = abs(personMiddlePoint[0] -
                                          personXrightlimit)
                    tperson = time.perf_counter()
                    person = 1
                    if tv == 1:
                        if abs(ttv - tperson) < 0.5:
                            if abs(personMiddlePoint[0] - tvMiddlePoint[0]) < (
                                    personXhalfSide + tvXhalfSide):
                                print(
                                    abs(personMiddlePoint[0] -
                                        tvMiddlePoint[0]))
                                print((personXhalfSide + tvXhalfSide))
                                OPEN = True
                        else:
                            OPEN = False
                            tv = 0

                if CLASSES[idx] == "bottle":
                    tvMiddlePoint = (((startX + endX) / 2),
                                     ((startY + endY) / 2))
                    tvXrightlimit = endX
                    tvXleftlimit = startX
                    tvXhalfSide = abs(tvMiddlePoint[0] - tvXrightlimit)
                    ttv = time.perf_counter()
                    tv = 1
                    if person == 1:
                        if abs(ttv - tperson) < 0.5:
                            if abs(personMiddlePoint[0] - tvMiddlePoint[0]) < (
                                    personXhalfSide + tvXhalfSide):
                                print(
                                    abs(personMiddlePoint[0] -
                                        tvMiddlePoint[0]))
                                print((personXhalfSide + tvXhalfSide))
                                OPEN = True
                        else:
                            OPEN = False
                            person = 0

                if OPEN == True and CanSwitch == False:
                    print("Opening the door")
                    publish.single("ledStatus", "1", hostname="127.0.0.1")
                    CanSwitch = True

                if OPEN == False and CanSwitch == True:
                    print("Closing the door")
                    publish.single("ledStatus", "0", hostname="127.0.0.1")
                    CanSwitch = False

        # show the output frame
        cv2.imshow("Frame", frame)
        cv2.waitKey(1)
ap = argparse.ArgumentParser()
ap.add_argument("-p","--picamera", type=int, default=-1, help="wether or not")
args = vars(ap.parse_args())


whiteLower = (0, 0, 240)      
whiteUpper = (180, 25, 255)   

#blueLower = ( 105, 100, 50)   
#blueUpper = (120, 255, 255)    

camera=VideoStream(usePiCamera=args["picamera"] > 0).start()


time.sleep(1.0)
fps = FPS().start()

while(True):
    
        
	frame = camera.read()       
	
	frame = imutils.resize(frame, width=400)
        blurred = cv2.GaussianBlur(frame,(11,11),0)
	hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        
#	bmask = cv2.inRange(hsv, blueLower, blueUpper)
#	bmask = cv2.erode(bmask, None, iterations=2)
#	bmask = cv2.dilate(bmask, None, iterations=2)

	mask = cv2.inRange(hsv, whiteLower, whiteUpper)
Exemplo n.º 17
0
# Now, let's get hold of the op that we can be processed to get the output.
# In the original network y_pred is the tensor that is the prediction of the network
y_pred = graph.get_tensor_by_name("y_pred:0")

## Let's feed the images to the input placeholders
x= graph.get_tensor_by_name("x:0") 
y_true = graph.get_tensor_by_name("y_true:0") 
y_test_images = np.zeros((1, (len(CLASSES)))) 

# inicializamos el streaming de video, y le permitimos al sensor de la
# camara que se prepare e incialize el contador FPS
print("[INFO] iniciando streaming de video...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
fps = FPS().start()

# bucle sobre los fotogramas de la transmision de video
while True:
    
    # toma el marco de la secuencia de video subproceso y cambia su tamano 
    # para que tenga un ancho maximo de 400 pixeles
    frame = vs.read()
    frame = imutils.resize(frame, width=400)
    
    
    #Toma las dimensiones del marco y lo convierte en un blob
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(frame, (128, 128)), 0.007843, (128, 128), 127.5)

    image_size=128
Exemplo n.º 18
0
class SpecificWorker(GenericWorker):
    def __init__(self, proxy_map):
        super(SpecificWorker, self).__init__(proxy_map)
        # self.Period = 2000
        # self.timer.start(self.Period)

        self.available_trackers = [
            "dlib", "mosse", "csrt", "kcf", "medianflow"
        ]

        self.classes = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]

        self.opencv_trackers = {
            "csrt": cv2.TrackerCSRT_create,
            "kcf": cv2.TrackerKCF_create,
            "medianflow": cv2.TrackerMedianFlow_create,
            "mosse": cv2.TrackerMOSSE_create
        }

        self.input = None
        self.save_results = None
        self.selected_tracker = None
        self.model = None
        self.prototxt = None

        self.net = None
        self.fps = None
        self.stream = None
        self.frame = None
        self.rgb = None
        self.rects = []
        self.trackers = None

        self.confidence = 0.5
        self.skip_frames = 30
        self.width = None
        self.height = None
        self.read_ipstream = False
        self.writer = None
        self.ct = CentroidTracker(maxDisappeared=50, maxDistance=40)
        self.trackableObjects = {}
        self.totalFrames = 0
        self.peopleInside = 0
        self.Up = 0
        self.Down = 0
        self.dict_id_position = {}

        if self.selected_tracker != "dlib":
            self.trackers = cv2.MultiTracker_create()
        else:
            self.trackers = []

        self.peopleCounterMachine.start()

    def __del__(self):
        print('SpecificWorker destructor')

    def setParams(self, params):
        self.input = params["video_input"]
        self.save_results = params["save_results"] == "True"
        self.selected_tracker = params["tracker"]
        self.model = params["model"]
        self.prototxt = params["prototxt"]

        if self.selected_tracker not in self.available_trackers:
            print("[ERROR] tracker not found")
            exit(-1)

        if self.input.startswith('http'):
            self.read_ipstream = True
            print("readIP")
        else:
            self.read_ipstream = False
            print("videoCapture")
        return True

    # =============== Slots methods for State Machine ===================
    # ===================================================================
    #
    # sm_initialize_video
    #
    @QtCore.Slot()
    def sm_initialize_video(self):
        print("Entered state initialize_video")

        print("[INFO] loading model...")
        self.net = cv2.dnn.readNetFromCaffe(self.prototxt, self.model)

        if self.read_ipstream:
            print("[INFO] starting video stream...")
            try:
                self.stream = ReadIPStream(self.input)
            except:
                print("Error trying readIPStream")
                self.initialize_videotofinalize_video.emit()
        else:
            print("[INFO] opening video file...")
            try:
                self.stream = cv2.VideoCapture(self.input)
            except:
                print("Error trying videoCapture")
                self.initialize_videotofinalize_video.emit()

        self.fps = FPS().start()

        self.initialize_videotoprocessing_video.emit()

    #
    # sm_processing_video
    #
    @QtCore.Slot()
    def sm_processing_video(self):
        print("Entered state processing_video")
        pass

    #
    # sm_finalize_video
    #
    @QtCore.Slot()
    def sm_finalize_video(self):
        print("Entered state finalize_video")

        cv2.destroyAllWindows()

        if self.writer is not None:
            self.writer.release()

        self.fps.stop()
        print("[INFO] total frames", self.totalFrames)
        print("[INFO] elapsed time: {:.2f}".format(self.fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(self.fps.fps()))

        exit(-1)

    ## Processing video sub states
    #
    # sm_reading_frames
    #
    @QtCore.Slot()
    def sm_reading_frames(self):
        print("Entered state reading_frames")

        if self.read_ipstream:
            process_image, self.frame = self.stream.read_stream()
        else:
            process_image, self.frame = self.stream.read()

        if self.read_ipstream == False and self.frame is None:
            self.processing_videotofinalize_video.emit()

        if process_image:
            self.frame = imutils.resize(self.frame, width=500)

            self.rgb = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)

            if self.width is None or self.height is None:
                (self.height, self.width) = self.frame.shape[:2]

            if self.save_results == True and self.writer is None:
                try:
                    current_date = datetime.now()
                    date = datetime.strftime(current_date, "%m%d%H%M")
                    video_dir = os.path.join(
                        "results",
                        self.selected_tracker + "_result_" + date + ".mp4")
                    print(video_dir)
                    self.writer = cv2.VideoWriter(
                        video_dir, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                        10, (self.width, self.heights))

                except:
                    print("Error trying to create video writer")

            self.rects = []

            if self.totalFrames % self.skip_frames == 0:
                self.reading_framestodetecting.emit()
            else:
                self.reading_framestotracking.emit()

    #
    # sm_detecting
    #
    @QtCore.Slot()
    def sm_detecting(self):
        print("Entered state detecting")

        if self.selected_tracker == "dlib":
            self.trackers = []
        else:
            self.trackers = cv2.MultiTracker_create()

        ###Deteccion con red neuronal
        # blob = cv2.dnn.blobFromImage(self.frame, 0.007843, (self.width, self.height), 127.5)
        # self.net.setInput(blob)
        # detections = self.net.forward()

        # for i in np.arange(0, detections.shape[2]):

        #     conf = detections[0, 0, i, 2]

        #     if conf > self.confidence:

        #         idx = int(detections[0, 0, i, 1])
        #         if self.classes[idx] != "person":
        #             continue

        #         box = detections[0, 0, i, 3:7] * np.array([self.width, self.height, self.width, self.height])
        #         (startX, startY, endX, endY) = box.astype("int")

        #         if self.selected_tracker == "dlib":

        #             tracker = dlib.correlation_tracker()
        #             rect = dlib.rectangle(startX, startY, endX, endY)
        #             tracker.start_track(self.rgb, rect)
        #             self.trackers.append(tracker)

        #         else:
        #             (x, y, w, h) = (startX, startY, endX - startX, endY - startY)
        #             tracker = self.opencv_trackers[self.selected_tracker]()
        #             self.trackers.add(tracker, self.frame, (x, y, w, h))

        #         self.rects.append((startX, startY, endX, endY))

        ##Deteccion con openPifPaf
        im = TImage()
        im.image = self.frame.data
        im.height, im.width, im.depth = self.frame.shape
        people = self.peopleserver_proxy.processImage(im, 1)

        for p in people:
            joints = p.joints
            list = []
            for jointname, pose in joints.items():
                if pose.score > 0:
                    list.append((int(pose.x), int(pose.y)))
                    cv2.circle(self.frame, (int(pose.x), int(pose.y)), 2,
                               (0, 0, 255), -1)

            x, y, w, h = cv2.boundingRect(cv2.UMat(np.asarray(list)))
            if self.selected_tracker == "dlib":
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(x, y, x + w, y + h)
                tracker.start_track(self.rgb, rect)
                self.trackers.append(tracker)

            else:
                tracker = self.opencv_trackers[self.selected_tracker]()
                self.trackers.add(tracker, self.frame, (x, y, w, h))

            self.rects.append((x, y, x + w, y + h))

        #recorrer personas que devuelve, paara cada una almacenar los joints con score >0
        #para cada persona calcular el bounding rect que contiene los puntos de su esqueleto (habia que haceru na doble transformacion para calcular el bounding rect)
        #teniendo los rectangulos detectados para cada persona dependiendo del tracking añadirlos

        self.detectingtoupdate.emit()

    #
    # sm_tracking
    #
    @QtCore.Slot()
    def sm_tracking(self):
        print("Entered state tracking")

        if self.selected_tracker == "dlib":
            for tracker in self.trackers:
                tracker.update(self.rgb)
                pos = tracker.get_position()

                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                self.rects.append((startX, startY, endX, endY))

        else:
            (success, boxes) = self.trackers.update(self.frame)
            if success:
                for box in boxes:
                    (x, y, w, h) = [int(v) for v in box]
                    self.rects.append((x, y, x + w, y + h))

        self.trackingtoupdate.emit()

    #
    # sm_update
    #
    @QtCore.Slot()
    def sm_update(self):
        print("Entered state update")

        cv2.line(self.frame, (0, self.height // 2),
                 (self.width, self.height // 2), (0, 255, 255), 2)

        objects = self.ct.update(self.rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():

            to = self.trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            else:
                # the difference between the y-coordinate of the *current*
                # centroid and the mean of *previous* centroids give the
                # in which direction the object is moving
                y = [c[1] for c in to.centroids]
                direction = centroid[1] - np.mean(y)
                to.centroids.append(centroid)

                # check to see if the object has already been counted
                if not to.counted:

                    # direction is negative(moving up)
                    # AND centroid is above centre, count the obect
                    if direction < 0 and centroid[1] < self.height // 2:
                        self.Up += 1
                        to.counted = True

                    # direction is positive(moving down)
                    # AND centroid is below centre, count the obect
                    elif direction > 0 and centroid[1] > self.height // 2:
                        self.Down += 1
                        to.counted = True
            # store the trackable object
            # trackableObjects[objectID] = to

            # get count of people inside
            self.peopleInside = self.Down - self.Up

            self.trackableObjects[objectID] = to

            text = "ID {}".format(objectID)
            cv2.putText(self.frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(self.frame, (centroid[0], centroid[1]), 4, (0, 255, 0),
                       -1)

        # for rect in self.rects:
        #     (startX, startY, endX, endY) = rect
        #     cv2.rectangle(self.frame, (startX, startY), (endX, endY),
        #                   (0, 255, 0), 2)

        info = [
            ("People inside", self.peopleInside),
            ("Tracker", self.selected_tracker),
        ]

        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(self.frame, text, (10, self.height - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        if self.writer is not None:
            self.writer.write(self.frame)

        cv2.imshow("Frame", self.frame)
        key = cv2.waitKey(1) & 0xFF

        if key == 27:  # Escape
            self.processing_videotofinalize_video.emit()

        if key == ord('s'):
            currentDate = datetime.now()
            date = datetime.strftime(currentDate, "%m%d%H%M")
            photoname = os.path.join("results", "captura_" + date + ".jpg")
            print("[SAVING]", photoname)
            cv2.imwrite(photoname, self.frame)

        self.totalFrames += 1
        self.fps.update()

        self.updatetoreading_frames.emit()
	(H, W) = frame.shape[:2]

	if frame_number % wait_time == 0 :#and tracker_lost == 1:
		wait_time = 50
		print("detecting")

		(rects, weights) = hog.detectMultiScale(frame, winStride=(4, 4),padding=(8, 8), scale=1.05)
		rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
		pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)		
		if len(pick) > 0:l
			print(pick[0])
			xa, ya, xb, yb = pick[0]  
			initBB = (xa, ya, xb-xa, yb-ya)
			tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
			tracker.init(frame, initBB)	
			fps = FPS().start()
		
		

	# check to see if we are currently tracking an object
	if initBB is not None:
		# grab the new bounding box coordinates of the object
		(success, box) = tracker.update(frame)

		# check to see if the tracking was a success
		if success:
			tracker_lost = 0
			(x, y, w, h) = [int(v) for v in box]
			cv2.rectangle(frame, (x, y), (x + w, y + h),
				(0, 255, 0), 2)
			tracker_lost = 0
Exemplo n.º 20
0
class Main:
    def __init__(self, file=None, camera=False):
        print("Loading pipeline...")
        self.file = file
        self.camera = camera
        self.create_pipeline()
        self.start_pipeline()
        self.COUNTER = 0
        self.mCOUNTER = 0
        self.hCOUNTER = 0
        self.TOTAL = 0
        self.mTOTAL = 0
        self.hTOTAL = 0
        self.fps = FPS()
        self.fps.start()

    def create_pipeline(self):
        print("Creating pipeline...")
        self.pipeline = depthai.Pipeline()
        if self.camera:
            print("Creating Color Camera...")
            cam = self.pipeline.createColorCamera()
            cam.setPreviewSize(300, 300)
            cam.setResolution(
                depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
            cam.setInterleaved(False)
            cam.setCamId(0)
            cam_xout = self.pipeline.createXLinkOut()
            cam_xout.setStreamName("cam_out")
            cam.preview.link(cam_xout.input)

        self.models("models/face-detection-retail-0004.blob", "face")
        self.models("models/face_landmark_160x160.blob", "land68")

    def models(self, model_path, name):
        print(f"开始创建{model_path}神经网络")
        model_in = self.pipeline.createXLinkIn()
        model_in.setStreamName(f"{name}_in")
        model_nn = self.pipeline.createNeuralNetwork()
        model_nn.setBlobPath(str(Path(model_path).resolve().absolute()))
        model_nn_xout = self.pipeline.createXLinkOut()
        model_nn_xout.setStreamName(f"{name}_nn")
        model_in.out.link(model_nn.input)
        model_nn.out.link(model_nn_xout.input)

    def start_pipeline(self):
        self.device = depthai.Device(self.pipeline, usb2Mode=True)
        print("Starting pipeline...")
        self.device.startPipeline()
        self.face_in = self.device.getInputQueue("face_in")
        self.face_nn = self.device.getOutputQueue("face_nn")
        self.land68_in = self.device.getInputQueue("land68_in")
        self.land68_nn = self.device.getOutputQueue("land68_nn")
        if self.camera:
            self.cam_out = self.device.getOutputQueue("cam_out", 1, True)

    def full_frame_cords(self, cords):
        original_cords = self.face_coords[0]
        return [
            original_cords[0 if i % 2 == 0 else 1] + val
            for i, val in enumerate(cords)
        ]

    def full_frame_bbox(self, bbox):
        relative_cords = self.full_frame_cords(bbox)
        height, width = self.frame.shape[:2]
        y_min = max(0, relative_cords[1])
        y_max = min(height, relative_cords[3])
        x_min = max(0, relative_cords[0])
        x_max = min(width, relative_cords[2])
        result_frame = self.frame[y_min:y_max, x_min:x_max]
        return result_frame, relative_cords

    def draw_bbox(self, bbox, color):
        cv2.rectangle(self.debug_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]),
                      color, 2)

    def run_face(self):
        nn_data = run_nn(self.face_in, self.face_nn,
                         {"data": to_planar(self.frame, (300, 300))})
        results = to_bbox_result(nn_data)
        self.face_coords = [
            frame_norm(self.frame, *obj[3:7]) for obj in results
            if obj[2] > 0.4
        ]
        if len(self.face_coords) == 0:
            return False
        if len(self.face_coords) > 0:
            for face_coord in self.face_coords:
                face_coord[0] -= 15
                face_coord[1] -= 15
                face_coord[2] += 15
                face_coord[3] += 15
            self.face_frame = [
                self.frame[face_coord[1]:face_coord[3],
                           face_coord[0]:face_coord[2]]
                for face_coord in self.face_coords
            ]
        if debug:
            for bbox in self.face_coords:
                self.draw_bbox(bbox, (10, 245, 10))
        return True

    def run_land68(self, face_frame, count):
        try:
            nn_data = run_nn(self.land68_in, self.land68_nn,
                             {"data": to_planar(face_frame, (160, 160))})
            out = to_nn_result(nn_data)
            result = frame_norm(face_frame, *out)
            eye_left = []
            eye_right = []
            mouth = []
            hand_points = []
            for i in range(72, 84, 2):
                # cv2.circle(self.debug_frame,(result[i]+self.face_coords[count][0],result[i+1]+self.face_coords[count][1]),2,(255,0,0),thickness=1,lineType=8,shift=0)
                eye_left.append((out[i], out[i + 1]))
            for i in range(84, 96, 2):
                # cv2.circle(self.debug_frame,(result[i]+self.face_coords[count][0],result[i+1]+self.face_coords[count][1]),2,(255,0,0),thickness=1,lineType=8,shift=0)
                eye_right.append((out[i], out[i + 1]))
            for i in range(96, len(result), 2):
                # cv2.circle(self.debug_frame,(result[i]+self.face_coords[count][0],result[i+1]+self.face_coords[count][1]),2,(255,0,0),thickness=1,lineType=8,shift=0)
                if i == 100 or i == 116 or i == 104 or i == 112 or i == 96 or i == 108:
                    # cv2.circle(self.debug_frame,(result[i]+self.face_coords[count][0],result[i+1]+self.face_coords[count][1]),2,(255,0,0),thickness=1,lineType=8,shift=0)
                    mouth.append(np.array([out[i], out[i + 1]]))

            for i in range(16, 110, 2):
                if i == 16 or i == 60 or i == 72 or i == 90 or i == 96 or i == 108:
                    cv2.circle(self.debug_frame,
                               (result[i] + self.face_coords[count][0],
                                result[i + 1] + self.face_coords[count][1]),
                               2, (255, 0, 0),
                               thickness=1,
                               lineType=8,
                               shift=0)
                    hand_points.append(
                        (result[i] + self.face_coords[count][0],
                         result[i + 1] + self.face_coords[count][1]))

            ret, rotation_vector, translation_vector, camera_matrix, dist_coeffs = get_pose_estimation(
                self.frame.shape, np.array(hand_points, dtype='double'))
            ret, pitch, yaw, roll = get_euler_angle(rotation_vector)
            (nose_end_point2D,
             jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]),
                                           rotation_vector, translation_vector,
                                           camera_matrix, dist_coeffs)
            p1 = (int(hand_points[1][0]), int(hand_points[1][1]))
            p2 = (int(nose_end_point2D[0][0][0]),
                  int(nose_end_point2D[0][0][1]))
            cv2.line(self.debug_frame, p1, p2, (255, 0, 0), 2)
            euler_angle_str = 'Y:{}, X:{}, Z:{}'.format(pitch, yaw, roll)
            cv2.putText(self.debug_frame, euler_angle_str, (10, 20),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5, (0, 0, 255))
            # print(euler_angle_str)
            if pitch < 0:
                self.hCOUNTER += 1
                if self.hCOUNTER >= 20:
                    cv2.putText(self.debug_frame, "SLEEP!!!",
                                (self.face_coords[count][0],
                                 self.face_coords[count][1] - 10),
                                cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 3)
            else:
                if self.hCOUNTER >= 3:
                    self.hTOTAL += 1
                self.hCOUNTER = 0

            left_ear = self.eye_aspect_ratio(eye_left)
            right_ear = self.eye_aspect_ratio(eye_right)
            ear = (left_ear + right_ear) / 2.0
            if ear < 0.15:  # 眼睛长宽比:0.15
                self.COUNTER += 1
                if self.COUNTER >= 20:
                    cv2.putText(self.debug_frame, "SLEEP!!!",
                                (self.face_coords[count][0],
                                 self.face_coords[count][1] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
            else:
                # 如果连续3次都小于阈值,则表示进行了一次眨眼活动
                if self.COUNTER >= 3:  # 阈值:3
                    self.TOTAL += 1
                # 重置眼帧计数器
                self.COUNTER = 0

            mouth_ratio = self.mouth_aspect_ratio(mouth)
            if mouth_ratio > 0.5:
                self.mCOUNTER += 1
            else:
                if self.mCOUNTER >= 3:
                    self.mTOTAL += 1
                self.mCOUNTER = 0

            # print(self.TOTAL,self.mTOTAL,self.hTOTAL)
            cv2.putText(
                self.debug_frame,
                "eye:{:d},mouth:{:d},head:{:d}".format(self.TOTAL, self.mTOTAL,
                                                       self.hTOTAL), (10, 40),
                cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.5, (
                    255,
                    0,
                    0,
                ))
            if self.TOTAL >= 50 or self.mTOTAL >= 15 or self.hTOTAL >= 10:
                cv2.putText(self.debug_frame, "Danger!!!", (100, 200),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
        except:
            pass

    def mouth_aspect_ratio(self, mouth):
        A = np.linalg.norm(mouth[1] - mouth[5])  # 51, 59
        B = np.linalg.norm(mouth[2] - mouth[4])  # 53, 57
        C = np.linalg.norm(mouth[0] - mouth[3])  # 49, 55
        mar = (A + B) / (2.0 * C)
        return mar

    def eye_aspect_ratio(self, eye):
        A = euclidean(eye[1], eye[5])
        B = euclidean(eye[2], eye[4])
        C = euclidean(eye[0], eye[3])
        return (A + B) / (2.0 * C)

    def parse(self):
        if debug:
            self.debug_frame = self.frame.copy()

        face_success = self.run_face()
        if face_success:
            for i in range(len(self.face_frame)):
                self.run_land68(self.face_frame[i], i)
        if debug:
            aspect_ratio = self.frame.shape[1] / self.frame.shape[0]
            cv2.imshow(
                "Camera_view",
                cv2.resize(self.debug_frame,
                           (int(900), int(900 / aspect_ratio))))
            if cv2.waitKey(1) == ord('q'):
                cv2.destroyAllWindows()
                raise StopIteration()

    def run_video(self):
        cap = cv2.VideoCapture(str(Path(self.file).resolve().absolute()))
        while cap.isOpened():
            read_correctly, self.frame = cap.read()
            if not read_correctly:
                break

            try:
                self.parse()
            except StopIteration:
                break
        cap.release()

    def run_camera(self):
        while True:
            self.frame = np.array(self.cam_out.get().getData()).reshape(
                (3, 300, 300)).transpose(1, 2, 0).astype(np.uint8)
            self.fps.update()
            try:
                self.parse()
            except StopIteration:
                break

    def run(self):
        if self.file is not None:
            self.run_video()
        else:
            self.run_camera()
        self.fps.stop()
        print("FPS:{:.2f}".format(self.fps.fps()))
        del self.device
Exemplo n.º 21
0
    def count_single_object_from_video(self, path_to_video, path_to_out, obj):
        self.path_to_video = path_to_video
        self.path_to_out = path_to_out
        conf = 0.4
        sk_fr = 30
        net = cv2.dnn.readNetFromCaffe('MobileNetSSD_deploy.prototxt',
                                       'MobileNetSSD_deploy.caffemodel')
        vs = cv2.VideoCapture(self.path_to_video)
        writer = None
        W = None
        H = None
        ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
        trackers = []
        trackableObjects = {}
        totalFrames = 0
        #totalDown = 0
        #totalUp = 0
        total = 0
        fps = FPS().start()
        while True:
            frame = vs.read()
            frame = frame[1]
            frame = imutils.resize(frame, width=500)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            if W is None or H is None:
                (H, W) = frame.shape[:2]
            fourcc = cv2.VideoWriter_fourcc(*"MJPG")
            writer = cv2.VideoWriter(self.path_to_out, fourcc, 30, (W, H),
                                     True)
            status = "Waiting"
            rects = []
            if totalFrames % sk_fr == 0:

                status = "Detecting"
                trackers = []

                blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
                net.setInput(blob)
                detections = net.forward()

                for i in np.arange(0, detections.shape[2]):

                    confidence = detections[0, 0, i, 2]
                    if confidence > conf:

                        idx = int(detections[0, 0, i, 1])

                        if CLASSES[idx] != obj:
                            continue

                        box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                        (startX, startY, endX, endY) = box.astype("int")

                        tracker = dlib.correlation_tracker()
                        rect = dlib.rectangle(startX, startY, endX, endY)
                        tracker.start_track(rgb, rect)

                        trackers.append(tracker)

            else:

                for tracker in trackers:

                    status = "Tracking"

                    tracker.update(rgb)
                    pos = tracker.get_position()

                    startX = int(pos.left())
                    startY = int(pos.top())
                    endX = int(pos.right())
                    endY = int(pos.bottom())

                    rects.append((startX, startY, endX, endY))
            #cv2.line(frame, (0, H // 2), (W, H // 2), (0, 255, 255), 2)
            objects = ct.update(rects)

            for (objectID, centroid) in objects.items():

                to = trackableObjects.get(objectID, None)

                if to is None:
                    to = TrackableObject(objectID, centroid)

                else:

                    y = [c[1] for c in to.centroids]
                    direction = centroid[1] - np.mean(y)
                    to.centroids.append(centroid)

                    if not to.counted:

                        if direction < 0 and centroid[1] < H // 2:
                            #totalUp += 1
                            total += 1
                            to.counted = True

                        elif direction > 0 and centroid[1] > H // 2:
                            #totalDown += 1
                            total += 1
                            to.counted = True

                trackableObjects[objectID] = to

                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4,
                           (255, 255, 255), -1)

            info = [
                #("Up", totalUp),
                #("Down", totalDown),
                ('Total', total),
                ("Status", status),
            ]

            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                #cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                #cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            if writer is not None:
                writer.write(frame)

            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            totalFrames += 1
            fps.update()

        fps.stop()
        print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        if writer is not None:
            writer.release()

        else:
            vs.release()

        cv2.destroyAllWindows()
    def grab(self, cam, queue):
        try:
            cap = VideoStream(src=0).start()
            input_shape = self.emotion_classifier.layers[0].input_shape[1:3]

            self.tracker_initiated = False

            fontpath = self.cfg.FONT_PATH
            font = ImageFont.truetype(fontpath, 24)

            fps = FPS().start()
            frame_ind = 0
            CURRENT_FPS = 0
            emotions = [0] * 7

            self.camLabel.setText(
                "<html><head/><body><p align=\"center\"><span style=\" font-size:24pt;\">Камера</span></p></body></html>"
            )

            while (self.running):
                OUT_WIDTH = self.camForm.frameGeometry().width()
                OUT_HEIGHT = self.camForm.frameGeometry().height()

                scale_x = self.cfg.IN_WIDTH / OUT_WIDTH
                scale_y = self.cfg.IN_HEIGHT / OUT_HEIGHT

                frame = cap.read()

                gray = cv2.resize(frame,
                                  (self.cfg.IN_WIDTH, self.cfg.IN_HEIGHT))
                gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)

                out_frame = cv2.resize(frame, (OUT_WIDTH, OUT_HEIGHT))

                self.gray = gray

                if self.tracker_initiated:
                    success, box = self.tracker.update(gray)

                    if success:
                        x, y, w, h = [int(i) for i in box]

                        x = 0 if x < 0 else (
                            self.cfg.IN_WIDTH -
                            1 if x > self.cfg.IN_WIDTH - 1 else x)
                        y = 0 if y < 0 else (
                            self.cfg.IN_HEIGHT -
                            1 if y > self.cfg.IN_HEIGHT - 1 else y)

                        roi = gray[y:y + h, x:x + w]
                        roi = cv2.resize(roi,
                                         input_shape,
                                         interpolation=cv2.INTER_AREA)

                        roi = roi.astype("float") / 255.0
                        roi = img_to_array(roi)
                        roi = np.expand_dims(roi, axis=0)
                        preds = self.emotion_classifier.predict(roi)[0]
                        emotions = [int(emotion * 100) for emotion in preds]
                        emotion = np.argmax(preds)

                        x, y, w, h = (x // scale_x, y // scale_y, w // scale_x,
                                      h // scale_y)
                        x, y, w, h = (int(x), int(y), int(w), int(h))

                        draw_border(out_frame, (x, y), (x + w, y + h),
                                    self.cfg.SELECTED_COLOR, 2, 5, 15)

                        out_frame = Image.fromarray(out_frame)
                        draw = ImageDraw.Draw(out_frame)

                        tW, tH = font.getsize(self.cfg.EMOTIONS_RUS[emotion])
                        draw.rectangle(
                            ((x - 2, y + h + 5), (x + tW + 2, y + h + tH + 2)),
                            fill=self.cfg.SELECTED_COLOR)
                        draw.text((x, y + h),
                                  self.cfg.EMOTIONS_RUS[emotion],
                                  font=font,
                                  fill=(255, 255, 255, 255))

                        out_frame = np.array(out_frame)

                    else:
                        self.resetFace()

                else:
                    self.faces = self.face_detector.detectMultiScale(gray)

                    for x, y, w, h in self.faces:
                        x, y, w, h = (x // scale_x, y // scale_y, w // scale_x,
                                      h // scale_y)
                        x, y, w, h = (int(x), int(y), int(w), int(h))

                        draw_border(out_frame, (x, y), (x + w, y + h),
                                    self.cfg.NOT_SELECTED_COLOR, 2, 5, 15)

                #Draw fps
                draw_text_w_background(out_frame, 'FPS: %.2f' % CURRENT_FPS,
                                       (20, 20), self.cfg.font,
                                       self.cfg.fontScale, self.cfg.fontColor,
                                       self.cfg.bgColor, 1)

                if queue.qsize() < 10:
                    #Calculate fps
                    fps.update()
                    frame_ind += 1
                    #Refresh CURRENT_FPS every 10 frames
                    if (frame_ind == 10):
                        fps.stop()
                        CURRENT_FPS = fps.fps()
                        fps = FPS().start()
                        frame_ind = 0

                    result = {
                        'img': out_frame,
                        'emotions':
                        emotions if self.tracker_initiated else [0] * 7
                    }

                    queue.put(result)
        except Exception as e:
            print(e)

        finally:
            cap.stop()
def tracker_speed():
    # construct the argument parser and parse the arguments
    #ap = argparse.ArgumentParser()
    #ap.add_argument("-c", "--conf", required=True,
    #	help="Path to the input configuration file")
    #ap.add_argument("-i", "--input", required=True,
    #	help="Path to the input video file")
    #args = vars(ap.parse_args())

    # load the configuration file
    #conf = Conf(args["conf"])
    conteudo = open(conf_path).read()

    conf = json.loads(conteudo)

    # initialize the list of class labels MobileNet SSD was trained to
    # detect
    CLASSES = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    # check to see if the Dropbox should be used
    if conf["use_dropbox"]:
        # connect to dropbox and start the session authorization process
        client = dropbox.Dropbox(conf["dropbox_access_token"])
        print("[SUCCESS] dropbox account linked")

    # load our serialized model from disk
    print("[INFO] loading model...")
    net = cv2.dnn.readNetFromCaffe(conf["prototxt_path"], conf["model_path"])
    #net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

    # initialize the video stream and allow the camera sensor to warmup
    print("[INFO] warming up camera...")
    #vs = VideoStream(src=0).start()
    vs = cv2.VideoCapture(0)
    tm.sleep(2.0)

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    H = None
    W = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=conf["max_disappear"],
                         maxDistance=conf["max_distance"])
    trackers = []
    trackableObjects = {}

    # keep the count of total number of frames
    totalFrames = 0

    # initialize the log file
    logFile = None

    # initialize the list of various points used to calculate the avg of
    # the vehicle speed
    points = [("A", "B"), ("B", "C"), ("C", "D")]

    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over the frames of the stream
    while True:
        # grab the next frame from the stream, store the current
        # timestamp, and store the new date
        ret, frame = vs.read()
        ts = datetime.now()
        newDate = ts.strftime("%m-%d-%y")

        # check if the frame is None, if so, break out of the loop
        if frame is None:
            break

        # if the log file has not been created or opened
        if logFile is None:
            # build the log file path and create/open the log file
            logPath = os.path.join(conf["output_path"], conf["csv_name"])
            logFile = open(logPath, mode="a")

            # set the file pointer to end of the file
            pos = logFile.seek(0, os.SEEK_END)

            # if we are using dropbox and this is a empty log file then
            # write the column headings
            if conf["use_dropbox"] and pos == 0:
                logFile.write("Year,Month,Day,Time,Speed (in KMPH),ImageID\n")

            # otherwise, we are not using dropbox and this is a empty log
            # file then write the column headings
            elif pos == 0:
                logFile.write("Year,Month,Day,Time (in KMPH),Speed\n")

        # resize the frame
        frame = imutils.resize(frame, width=conf["frame_width"])
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = frame.shape[:2]
            meterPerPixel = conf["distance"] / W

        # initialize our list of bounding box rectangles returned by
        # either (1) our object detector or (2) the correlation trackers
        rects = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % conf["track_object"] == 0:
            # initialize our new set of object trackers
            trackers = []

            # convert the frame to a blob and pass the blob through the
            # network and obtain the detections
            blob = cv2.dnn.blobFromImage(frame,
                                         size=(300, 300),
                                         ddepth=cv2.CV_8U)
            net.setInput(blob,
                         scalefactor=1.0 / 127.5,
                         mean=[127.5, 127.5, 127.5])
            detections = net.forward()

            # loop over the detections
            for i in np.arange(0, detections.shape[2]):
                # extract the confidence (i.e., probability) associated
                # with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by ensuring the `confidence`
                # is greater than the minimum confidence
                if confidence > conf["confidence"]:
                    # extract the index of the class label from the
                    # detections list
                    idx = int(detections[0, 0, i, 1])

                    # if the class label is not a car, ignore it
                    if CLASSES[idx] != "car":
                        continue

                    # if the class label is not a motorbike, ignore it
                    #if CLASSES[idx] != "motorbike":
                    #	continue

                    # compute the (x, y)-coordinates of the bounding box
                    # for the object
                    box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
                    (startX, startY, endX, endY) = box.astype("int")

                    # construct a dlib rectangle object from the bounding
                    # box coordinates and then start the dlib correlation
                    # tracker
                    tracker = dlib.correlation_tracker()
                    rect = dlib.rectangle(startX, startY, endX, endY)
                    tracker.start_track(rgb, rect)

                    # add the tracker to our list of trackers so we can
                    # utilize it during skip frames
                    trackers.append(tracker)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing
        # throughput
        else:
            # loop over the trackers
            for tracker in trackers:
                # update the tracker and grab the updated position
                tracker.update(rgb)
                pos = tracker.get_position()

                # unpack the position object
                startX = int(pos.left())
                startY = int(pos.top())
                endX = int(pos.right())
                endY = int(pos.bottom())

                # add the bounding box coordinates to the rectangles list
                rects.append((startX, startY, endX, endY))

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID
            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)

            # otherwise, if there is a trackable object and its speed has
            # not yet been estimated then estimate it
            elif not to.estimated:
                # check if the direction of the object has been set, if
                # not, calculate it, and set it
                if to.direction is None:
                    y = [c[0] for c in to.centroids]
                    direction = centroid[0] - np.mean(y)
                    to.direction = direction

                # if the direction is positive (indicating the object
                # is moving from left to right)
                if to.direction > 0:
                    # check to see if timestamp has been noted for
                    # point A
                    if to.timestamp["A"] == 0:
                        # if the centroid's x-coordinate is greater than
                        # the corresponding point then set the timestamp
                        # as current timestamp and set the position as the
                        # centroid's x-coordinate
                        if centroid[0] > conf["speed_estimation_zone"]["A"]:
                            to.timestamp["A"] = ts
                            to.position["A"] = centroid[0]

                    # check to see if timestamp has been noted for
                    # point B
                    elif to.timestamp["B"] == 0:
                        # if the centroid's x-coordinate is greater than
                        # the corresponding point then set the timestamp
                        # as current timestamp and set the position as the
                        # centroid's x-coordinate
                        if centroid[0] > conf["speed_estimation_zone"]["B"]:
                            to.timestamp["B"] = ts
                            to.position["B"] = centroid[0]

                    # check to see if timestamp has been noted for
                    # point C
                    elif to.timestamp["C"] == 0:
                        # if the centroid's x-coordinate is greater than
                        # the corresponding point then set the timestamp
                        # as current timestamp and set the position as the
                        # centroid's x-coordinate
                        if centroid[0] > conf["speed_estimation_zone"]["C"]:
                            to.timestamp["C"] = ts
                            to.position["C"] = centroid[0]

                    # check to see if timestamp has been noted for
                    # point D
                    elif to.timestamp["D"] == 0:
                        # if the centroid's x-coordinate is greater than
                        # the corresponding point then set the timestamp
                        # as current timestamp, set the position as the
                        # centroid's x-coordinate, and set the last point
                        # flag as True
                        if centroid[0] > conf["speed_estimation_zone"]["D"]:
                            to.timestamp["D"] = ts
                            to.position["D"] = centroid[0]
                            to.lastPoint = True

                # if the direction is negative (indicating the object
                # is moving from right to left)
                elif to.direction < 0:
                    # check to see if timestamp has been noted for
                    # point D
                    if to.timestamp["D"] == 0:
                        # if the centroid's x-coordinate is lesser than
                        # the corresponding point then set the timestamp
                        # as current timestamp and set the position as the
                        # centroid's x-coordinate
                        if centroid[0] < conf["speed_estimation_zone"]["D"]:
                            to.timestamp["D"] = ts
                            to.position["D"] = centroid[0]

                    # check to see if timestamp has been noted for
                    # point C
                    elif to.timestamp["C"] == 0:
                        # if the centroid's x-coordinate is lesser than
                        # the corresponding point then set the timestamp
                        # as current timestamp and set the position as the
                        # centroid's x-coordinate
                        if centroid[0] < conf["speed_estimation_zone"]["C"]:
                            to.timestamp["C"] = ts
                            to.position["C"] = centroid[0]

                    # check to see if timestamp has been noted for
                    # point B
                    elif to.timestamp["B"] == 0:
                        # if the centroid's x-coordinate is lesser than
                        # the corresponding point then set the timestamp
                        # as current timestamp and set the position as the
                        # centroid's x-coordinate
                        if centroid[0] < conf["speed_estimation_zone"]["B"]:
                            to.timestamp["B"] = ts
                            to.position["B"] = centroid[0]

                    # check to see if timestamp has been noted for
                    # point A
                    elif to.timestamp["A"] == 0:
                        # if the centroid's x-coordinate is lesser than
                        # the corresponding point then set the timestamp
                        # as current timestamp, set the position as the
                        # centroid's x-coordinate, and set the last point
                        # flag as True
                        if centroid[0] < conf["speed_estimation_zone"]["A"]:
                            to.timestamp["A"] = ts
                            to.position["A"] = centroid[0]
                            to.lastPoint = True

                # check to see if the vehicle is past the last point and
                # the vehicle's speed has not yet been estimated, if yes,
                # then calculate the vehicle speed and log it if it's
                # over the limit
                if to.lastPoint and not to.estimated:
                    # initialize the list of estimated speeds
                    estimatedSpeeds = []

                    # loop over all the pairs of points and estimate the
                    # vehicle speed
                    for (i, j) in points:
                        # calculate the distance in pixels
                        d = to.position[j] - to.position[i]
                        distanceInPixels = abs(d)

                        # check if the distance in pixels is zero, if so,
                        # skip this iteration
                        if distanceInPixels == 0:
                            continue

                        # calculate the time in hours
                        t = to.timestamp[j] - to.timestamp[i]
                        timeInSeconds = abs(t.total_seconds())
                        timeInHours = timeInSeconds / (60 * 60)

                        # calculate distance in kilometers and append the
                        # calculated speed to the list
                        distanceInMeters = distanceInPixels * meterPerPixel
                        distanceInKM = distanceInMeters / 1000
                        estimatedSpeeds.append(distanceInKM / timeInHours)

                    # calculate the average speed
                    to.calculate_speed(estimatedSpeeds)

                    # set the object as estimated
                    to.estimated = True
                    print("[INFO] Speed of the vehicle that just passed"\
                     " is: {:.2f} KMPH".format(to.speedKMPH))
                    odin.set_velocidade(to.speedKMPH)

            # store the trackable object in our dictionary
            trackableObjects[objectID] = to

            # draw both the ID of the object and the centroid of the
            # object on the output frame
            text = "ID {}".format(objectID)
            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

            # check if the object has not been logged
            if not to.logged:
                # check if the object's speed has been estimated and it
                # is higher than the speed limit
                if to.estimated and to.speedKMPH > conf["speed_limit"]:
                    # set the current year, month, day, and time
                    year = ts.strftime("%Y")
                    month = ts.strftime("%m")
                    day = ts.strftime("%d")
                    time = ts.strftime("%H:%M:%S")

                    # check if dropbox is to be used to store the vehicle
                    # image
                    if conf["use_dropbox"]:
                        # initialize the image id, and the temporary file
                        imageID = ts.strftime("%H%M%S%f")
                        tempFile = TempFile()
                        cv2.imwrite(tempFile.path, frame)

                        # create a thread to upload the file to dropbox
                        # and start it
                        t = Thread(target=upload_file,
                                   args=(
                                       tempFile,
                                       client,
                                       imageID,
                                   ))
                        t.start()

                        # log the event in the log file
                        info = "{},{},{},{},{},{}\n".format(
                            year, month, day, time, to.speedKMPH, imageID)
                        logFile.write(info)

                    # otherwise, we are not uploading vehicle images to
                    # dropbox
                    else:
                        # log the event in the log file
                        info = "{},{},{},{},{}\n".format(
                            year, month, day, time, to.speedKMPH)
                        logFile.write(info)

                    # set the object has logged
                    to.logged = True

        # if the *display* flag is set, then display the current frame
        # to the screen and record if a user presses a key
        if conf["display"]:
            cv2.imshow("frame", frame)
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key is pressed, break from the loop
            if key == ord("q"):
                break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # check if the log file object exists, if it does, then close it
    if logFile is not None:
        logFile.close()

    # close any open windows
    cv2.destroyAllWindows()

    # clean up
    print("[INFO] cleaning up...")
    vs.release()
Exemplo n.º 24
0
 def camera_loading(self):
     if self.isStatus == 1 or self.isStatus == 2:
         print('[INFO] Error in interrupting algorithm.')
         QMessageBox.information(
             self, 'Warning', 'You Must Stop Algorithm Process First. ' +
             '(Key: ... Suspending)', QMessageBox.Ok)
         return
     if self.isStatus == 3:
         self.isStatus = 0
         print("[INFO] Exporting webcam stream.")
         self.pushButton_cameraLoading.setText('&Camera Loading')
         self.bbox_list = []
         self.rectList = []
         self.paint_frame = None
         if self.vs is not None:
             self.vs.release()
         self.isPainting = False
         self.afterCamera = True
         self.F = len(listdir(self.save_location))
         self.scrollBar.setMaximum(self.F)
         self.scrollBar.setProperty('value', self.F)
     else:
         self.isStatus = 3
         self.pushButton_cameraLoading.setText('&Camera Suspending')
         self.afterCamera = False
     if self.isStatus == 3:
         trackers = []
         mirror = True
         print("[INFO] Importing webcam stream.")
         self.vs = cv2.VideoCapture(0)
         label_width = self.label_image.geometry().width()
         label_height = self.label_image.geometry().height()
         self.vs.set(3, label_width)
         self.vs.set(4, label_height)
         self.W = self.vs.get(3)
         self.H = self.vs.get(4)
         fps_cal = None
         self.N = 0
         self.first_frame = True
         save_loc_d = './ans/' + '__webcam__'
         save_loc_t = save_loc_d + '/' + str(self.tracker_name)
         self.save_location = save_loc_t
         system('mkdir ' + save_loc_t.replace('/', '\\'))
         system('del /q ' + save_loc_t.replace('/', '\\'))
         while True:
             _, frame = self.vs.read()
             if mirror:
                 frame = cv2.flip(frame, 1)
             self.paint_frame = frame
             if (not self.isPainting) and len(self.bbox_list):
                 self.progressBar.setFormat('HANDLE')
                 if self.first_frame:
                     print(
                         '[INFO] Here are initialization of processing webcam.'
                     )
                     for b in range(len(self.bbox_list)):
                         trackers.append(deepcopy(self.tracker))
                     for b in range(len(self.bbox_list)):
                         trackers[b].init(frame, self.bbox_list[b])
                     fps_cal = FPS().start()
                     self.analysis_init()
                     self.first_frame = False
                 else:
                     masks = None
                     for b in range(len(self.bbox_list)):
                         outputs = trackers[b].track(frame)
                         if 'polygon' in outputs:
                             polygon = np.array(outputs['polygon']).astype(
                                 np.int32)
                             cv2.polylines(frame,
                                           [polygon.reshape((-1, 1, 2))],
                                           True, (0, 255, 0), 3)
                             mask = ((outputs['mask'] >
                                      cfg.TRACK.MASK_THERSHOLD) * 255)
                             mask = mask.astype(np.uint8)
                             if masks is None:
                                 masks = mask
                             else:
                                 masks += mask
                             polygon_xmean = (polygon[0] + polygon[2] +
                                              polygon[4] + polygon[6]) / 4
                             polygon_ymean = (polygon[1] + polygon[3] +
                                              polygon[5] + polygon[7]) / 4
                             cv2.rectangle(frame, (int(polygon_xmean) - 1,
                                                   int(polygon_ymean) - 1),
                                           (int(polygon_xmean) + 1,
                                            int(polygon_ymean) + 1),
                                           (0, 255, 0), 3)
                             self.behavior_analysis(
                                 frame, b, (polygon_xmean, polygon_ymean),
                                 (mask > 0))
                         else:
                             bbox = list(map(int, outputs['bbox']))
                             cv2.rectangle(
                                 frame, (bbox[0], bbox[1]),
                                 (bbox[0] + bbox[2], bbox[1] + bbox[3]),
                                 (0, 255, 0), 3)
                     frame[:, :, 2] = (masks > 0) * 255 * 0.75 + (
                         masks == 0) * frame[:, :, 2]
                     fps_cal.update()
                     fps_cal.stop()
                     text = "FPS: {:.2f}".format(fps_cal.fps())
                     cv2.putText(frame, text, (60, 30),
                                 cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255),
                                 2)
             else:
                 # delay for 0.05s
                 if (self.isPainting):
                     self.progressBar.setFormat('DRAW')
                 else:
                     self.progressBar.setFormat('PHOTOGRAPHY')
                 cv2.waitKey(50)
             self.N += 1
             if self.checkBox.checkState():
                 save_loc_i = save_loc_t + "/" + str(
                     self.N).zfill(4) + ".jpg"
                 cv2.imwrite(save_loc_i, frame)
             self.label_image.setPixmap(QPixmap(self.cv2_to_qimge(frame)))
             QApplication.processEvents()
             if self.isStatus != 3:
                 self.vs.release()
                 self.progressBar.setFormat('STAND BY')
                 QApplication.processEvents()
                 break
Exemplo n.º 25
0
 def start_fps(self) -> None:
     """Start FPS timer"""
     self._fps = FPS()
     self._fps.start()
Exemplo n.º 26
0
def main():
    

    # set the filter of the video -- VSCO! still not working maybe later

    # here to try the method to moving the I/O blocking operations
    # to a separate thread and maitaining a queue of decoded frames
    # in an effort to improve FPS
    # .read() method is a blocking I/O operation

    camera = PiCamera()
    camera.resolution = (352, 240)
    camera.framerate = 32
    rawCapture = PiRGBArray(camera, size=(352, 240))
    stream = camera.capture_continuous(rawCapture, format="bgr", use_video_port=True)
    camera.close()


    vs = PiVideoStream().start()
    time.sleep(2.0)
    fps = FPS().start()
    


    minsize = 20

    caffe_model_path = "./model"

    threshold = [0.6, 0.7, 0.7]
    #initial threshold: 0.6 0.7 0.7
    factor = 0.709
    
    caffe.set_mode_cpu()
    PNet = caffe.Net(caffe_model_path+"/det1.prototxt", caffe_model_path+"/det1.caffemodel", caffe.TEST)
    RNet = caffe.Net(caffe_model_path+"/det2.prototxt", caffe_model_path+"/det2.caffemodel", caffe.TEST)
    ONet = caffe.Net(caffe_model_path+"/det3.prototxt", caffe_model_path+"/det3.caffemodel", caffe.TEST)

    while True: 
        start = timer()
        print("---------------------------------------------")
        frame = vs.read()
                            
        # convert the frame to gray scale and restore the BGR info

        grayFrame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        restore = cv2.cvtColor(grayFrame,cv2.COLOR_GRAY2BGR)
        

        img = restore
        #img = frame
        img_matlab = img.copy()
        tmp = img_matlab[:,:,2].copy()
        img_matlab[:,:,2] = img_matlab[:,:,0]
        img_matlab[:,:,0] = tmp

        # check rgb position
        #tic()
        boundingboxes, points = detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
        #toc()

        
        img = drawBoxes(frame, boundingboxes)
        for i in points:
            for j in range(5):
                cv2.circle(img, (i[j], i[j+5]), 1, (0, 255, 0),-1)

        cv2.namedWindow('cam', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('cam',960,720)
        cv2.imshow('cam', img)
        
        if cv2.waitKey(1) &0xFF == ord('q'):
            break
            
        
        end = timer()
        print ("Total time:",end-start)

        fps.update()

    #When everything's done, release capture
    #cap.release()
    cv2.destroyAllWindows()
    vs.stop()
    vs.update()
Exemplo n.º 27
0
class VideoPlayer(QtCore.QObject):
    def __init__(self, ball_tracker: BallTracker) -> None:
        """Creates an instance of this class that contains properties used by the
        video player to display frames processed by the ball tracker"""
        super().__init__()
        self.ball_tracker = ball_tracker or BallTracker()
        self.video_processor_lock = threading.Lock()
        self.video_processor_stop_event = threading.Event()
        self.video_processor: VideoProcessor | None = None
        self.video_file_stream: VideoFileStream | None = None
        self.video_file: str | None = None

        self._width = 1100
        self._height = 600
        self._play = False
        self._crop_frames = False
        self._show_threshold = False
        self._perform_morph = False
        self._detect_table = False
        self._queue_size = 0
        self._fps = FPS()
        self._output_frame = np.array([])
        self._hsv_frame = np.array([])

    widthChanged = QtCore.pyqtSignal(int)

    @property
    def width(self) -> int:
        """Width property

        :return: player width
        :rtype: int
        """
        return self._width

    @width.setter
    def width(self, value: int) -> None:
        """Width setter

        :param value: value to set
        :type value: int
        """
        self._width = value
        self.widthChanged.emit(self._width)

    heightChanged = QtCore.pyqtSignal(int)

    @property
    def height(self) -> int:
        """Height property

        :return: player height
        :rtype: int
        """
        return self._height

    @height.setter
    def height(self, value: int) -> None:
        """Height setter

        :param value: value to set
        :type value: int
        """
        self._height = value
        self.heightChanged.emit(self._height)

    playChanged = QtCore.pyqtSignal(bool)

    @property
    def play(self) -> bool:
        """Play property

        :return: play video
        :rtype: bool
        """
        return self._play

    @play.setter
    def play(self, value: bool) -> None:
        """Play setter

        :param value: value to set
        :type value: bool
        """
        self._play = value
        self.playChanged.emit(self._play)

    crop_framesChanged = QtCore.pyqtSignal(bool)

    @property
    def crop_frames(self) -> bool:
        """Crop frames property

        :return: crop frames
        :rtype: bool
        """
        return self._crop_frames

    @crop_frames.setter
    def crop_frames(self, value: bool) -> None:
        """Crop frames setter

        :param value: value to set
        :type value: bool
        """
        self._crop_frames = value
        self.crop_framesChanged.emit(self._crop_frames)

    show_thresholdChanged = QtCore.pyqtSignal(bool)

    @property
    def show_threshold(self) -> bool:
        """Show threshold property

        :return: show threshold
        :rtype: bool
        """
        return self._show_threshold

    @show_threshold.setter
    def show_threshold(self, value: bool) -> None:
        """Show threshold setter

        :param value: value to set
        :type value: bool
        """
        self._show_threshold = value
        self.show_thresholdChanged.emit(self._show_threshold)

    perform_morphChanged = QtCore.pyqtSignal(bool)

    @property
    def perform_morph(self) -> bool:
        """Perform morph property

        :return: perform morph
        :rtype: bool
        """
        return self._perform_morph

    @perform_morph.setter
    def perform_morph(self, value: bool) -> None:
        """Perform morph setter

        :param value: value to set
        :type value: bool
        """
        self._perform_morph = value
        self.perform_morphChanged.emit(self._perform_morph)

    detect_tableChanged = QtCore.pyqtSignal(bool)

    @property
    def detect_table(self) -> bool:
        """Detect table property

        :return: detect table
        :rtype: bool
        """
        return self._detect_table

    @detect_table.setter
    def detect_table(self, value: bool) -> None:
        """Detect table setter

        :param value: value to set
        :type value: bool
        """
        self._detect_table = value
        self.detect_tableChanged.emit(self._detect_table)

    queue_sizeChanged = QtCore.pyqtSignal(int)

    @property
    def queue_size(self) -> int:
        """Queue size property

        :return: queue size
        :rtype: int
        """
        return self._queue_size

    @queue_size.setter
    def queue_size(self, value: int) -> None:
        """Queue size setter

        :param value: value to set
        :type value: int
        """
        self._queue_size = value
        self.queue_sizeChanged.emit(self._queue_size)

    fpsChanged = QtCore.pyqtSignal(int)

    def start_fps(self) -> None:
        """Start FPS timer"""
        self._fps = FPS()
        self._fps.start()

    def update_fps(self) -> None:
        """Update FPS timer"""
        self._fps.update()

    def stop_fps(self) -> None:
        """Stop FPS timer"""
        self._fps.stop()
        self.fpsChanged.emit(self._fps.fps())

    output_frameChanged = QtCore.pyqtSignal(np.ndarray)

    @property
    def output_frame(self) -> Frame:
        """Frame property

        :return: output frame
        :rtype: np.ndarray
        """
        return self._output_frame

    @output_frame.setter
    def output_frame(self, value: Frame) -> None:
        """Output frame setter

        :param value: value to set
        :type value: np.ndarray
        """
        self._output_frame = value
        self.output_frameChanged.emit(self._output_frame)

    hsv_frameChanged = QtCore.pyqtSignal(np.ndarray)

    @property
    def hsv_frame(self) -> Frame:
        """HSV frame property

        :return: hsv frame
        :rtype: np.ndarray
        """
        return self._hsv_frame

    @hsv_frame.setter
    def hsv_frame(self, value: Frame) -> None:
        """HSV frame setter

        :param value: value to set
        :type value: np.ndarray
        """
        self._hsv_frame = value
        self.hsv_frameChanged.emit(self._hsv_frame)

    def start(self, video_file: str | None = None) -> None:
        """Creates VideoProcessor and VideoFileStream instances to handle
        the selected video file.

        The VideoFileStream is the producer thread and the VideoProcessor
        is the consumer thread, where the VideoFileStream instance reads
        frames from the video file and puts them into a queue for the
        VideoProcessor to obtain frames to process from.

        The VideoProcessor then passes processed frames to the VideoPlayer
        to display to the user.

        :param video_file: video file to read from, defaults to None
        :type video_file: str, optional
        :raises TypeError: if `video_file` isn't an actual video file
        """
        if video_file and "video" not in magic.from_file(
                video_file, mime=True):  # type: ignore[no-untyped-call]
            raise TypeError(f"{video_file} is not a video file")

        self.play = False
        self.video_file = video_file or self.video_file

        if self.video_file is None:
            raise ValueError("video_file is not set")

        self.destroy_video_threads()
        self.video_processor_stop_event.clear()

        self.video_file_stream = VideoFileStream(
            self.video_file,
            video_player=self,
            colour_settings=self.ball_tracker.colour_settings,
            queue_size=1,
        )

        self.video_processor = VideoProcessor(
            video_stream=self.video_file_stream,
            video_player=self,
            ball_tracker=self.ball_tracker,
            lock=self.video_processor_lock,
            stop_event=self.video_processor_stop_event,
        )

        self.video_processor.start()

    def restart(self) -> None:
        """Restart the video player by destroying the VideoProcessor
        and VideoFileStream instances and creating new ones before
        starting the video player again."""
        self.start()

    def destroy_video_threads(self) -> None:
        """Destroy the VideoProcessor and VideoFileStream thread instances"""
        if self.video_processor is not None:
            if self.video_file_stream is not None:
                with self.video_processor_lock:
                    self.video_file_stream.stop()
            self.video_processor_stop_event.set()
            self.video_processor.join()
Exemplo n.º 28
0
# generate a set of bounding box colors for each class
CLASSES = ['motorbike', 'person']
# Here we are categorising two categories to be found from each frame 
# 1. is the motorcycle   and 2nd is the person sitting on it.
# COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))


## initialise the models
motor_bike1.initialize()

cap = cv2.VideoCapture('ssvid.mp4')     
## cap is the object of Videocapture


# Starting the FPS calculation
fps = FPS().start()

# loop over the frames from the video stream
while True:
    try:
        ret, frame = cap.read()
        frame = imutils.resize(frame, width=600, height=600)

        ## getting the dimensions of the frame or image
        (h, w) = frame.shape[:2]
        blob = motor_bike1.process_image(frame)

        '''
        'persons' is a list containing the coordinates of each person in the image
        'motorbi' is a list containing the coordinates of each motor bike in the image
        '''
Exemplo n.º 29
0
    def recognise(self, cascade='haarcascade_frontalface_default.xml'):
        print("[INFO] loading encodings + face detector...")
        data = pickle.loads(open(self.encoding, "rb").read())
        detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        time.sleep(2.0)

        fps = FPS().start()

        while True:

            frame = vs.read()
            frame = imutils.resize(frame, width=500)

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            rects = detector.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=5,
                                              minSize=(30, 30),
                                              flags=cv2.CASCADE_SCALE_IMAGE)

            boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]

            encodings = face_recognition.face_encodings(rgb, boxes)
            names = []

            for encoding in encodings:

                matches = face_recognition.compare_faces(
                    data["encodings"], encoding)
                name = "Unknown"

                # check to see if we have found a match
                if True in matches:

                    matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                    counts = {}

                    for i in matchedIdxs:
                        name = data["names"][i]
                        counts[name] = counts.get(name, 0) + 1
                    name = max(counts, key=counts.get)

                names.append(name)
                rfid = RFID()
                rfid.RFID_db(operation='match', name=name.lower())

            for ((top, right, bottom, left), name) in zip(boxes, names):
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0),
                              2)
                y = top - 15 if top - 15 > 15 else top + 15
                cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
                            0.75, (0, 255, 0), 2)

            cv2.imshow("Frame", frame)
            key = cv2.waitKey(1) & 0xFF

            if key == ord("q"):
                break

            fps.update()

        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        cv2.destroyAllWindows()
        vs.stop()


# run = python_face_recognition()
# if str(sys.argv[1]).lower() == 'encode':
#     run.encode()

# elif str(sys.argv[1]).lower() == 'recognise':
#     run.recognise()

# else:
#     print('flase')
Exemplo n.º 30
0
def main():
    classes = [
        "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
        "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
        "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
        "tvmonitor"
    ]

    # MobileNet Variables
    proto_file = "mobilenet_ssd/MobileNetSSD_deploy.prototxt"
    model_file = "mobilenet_ssd/MobileNetSSD_deploy.caffemodel"

    # load the model
    print("Loading the Caffe model")
    net = cv2.dnn.readNetFromCaffe(proto_file, model_file)

    # connect to firestore database
    print("Connecting to firestore database")
    database = firebaseaccess.connDb()

    # connect to arduino
    print("Connecting to Arduino")
    arduino = serial.Serial("COM3", baudrate=9600)
    arduino.flushInput()

    # load video file
    print("Opening video")
    vid = cv2.VideoCapture("videos/aaron_vid.mp4")

    #if arduino was successfully connected to, send 'Accept' code
    if arduino:
        arduino.write("A".encode())

    # initialise variables
    threshold = firebaseaccess.getThreshold(
        database)  # get latest threshold val from database
    time_now = datetime.now()  # get current time to enter into database
    device_name = firebaseaccess.getDeviceName(database)  # get device name
    width = None
    height = None
    total_frames = 0
    count = 0

    #different colours to use
    YELLOW = (0, 255, 255)
    CYAN = (255, 255, 0)
    RED = (0, 0, 255)
    BLUE = (255, 0, 0)
    GREEN = (0, 255, 0)

    #Initialise centroidtracker and a list of trackers
    cenTrack = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackObjs = {}

    #start fps to keep track of fps
    fps = FPS().start()

    while 1:
        #read the video and save a frame as 'img'
        img = vid.read()
        img = img[1]

        #if img is none then it is most likely the end of the frame, or there was an error
        if img is None:
            print("End of video, loop ending")
            break

        #resize the image to fit the blob function
        img = imutils.resize(img, width=500)
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        #set the height and width of the image
        height = img.shape[0]
        width = img.shape[1]

        #create list of bounding boxes
        b_boxes = []

        #every 20 frames perform object detection
        if total_frames % 30 == 0:
            trackers = []
            blob = cv2.dnn.blobFromImage(img, 0.007843, (width, height), 127.5)
            net.setInput(blob)
            detects = net.forward()

            for i in np.arange(0, detects.shape[2]):
                conf = detects[0, 0, i, 2]
                if conf > 0.3:
                    idx = int(detects[0, 0, i, 1])

                    if classes[idx] != "person":
                        continue

                    box = detects[0, 0, i, 3:7] * np.array(
                        [width, height, width, height])
                    (x1, y1, x2, y2) = box.astype("int")

                    tracker = dlib.correlation_tracker()
                    tracker.start_track(img, dlib.rectangle(x1, y1, x2, y2))

                    trackers.append(tracker)
        else:
            for tracker in trackers:
                tracker.update(img_rgb)

                position = tracker.get_position()

                x1 = int(position.left())
                y1 = int(position.top())
                x2 = int(position.right())
                y2 = int(position.bottom())

                b_boxes.append((x1, y1, x2, y2))

        #detection line coordinates (from x1,y1 to x2,y2)
        line_x1 = 0
        line_y1 = (height // 2)
        line_x2 = width
        line_y2 = (height // 2)

        #draw line across the screen at halfway
        #cv2.line(image, start_point, end_point, colour, thickness)
        cv2.line(img, (line_x1, line_y1), (line_x2, line_y2), RED, 1)

        objects = cenTrack.update(b_boxes)

        #iterate through the objects that are being tracked
        for (objectID, centroid) in objects.items():
            track_obj = trackObjs.get(objectID, None)

            #obtain objs current x,y coord from centroid array
            curr_x_pos = centroid[0]
            curr_y_pos = centroid[1]

            #if there is no current object being tracked then create a new one
            if track_obj is None:
                track_obj = TrackableObject(objectID, centroid)

            #iterate through the tracked objects
            else:
                y = [c[1] for c in track_obj.centroids]

                #direction is calculated by subtracting current centroid position from previous positions
                direction = curr_y_pos - np.mean(y)
                track_obj.centroids.append(centroid)

                #if the object hasn't already been counted
                if not track_obj.counted:
                    #if direction is moving up, and the object is above the line, then decrement
                    if direction < 0 and curr_y_pos < line_y1:
                        new_count = -1
                        track_obj.counted = True

                        #confirm that person was detected and update count accordingly
                        count = updateArduino(arduino, new_count, count,
                                              threshold, time_now, device_name,
                                              database)

                    ##if direction is moving down, and the object is below the line, then increment
                    elif direction > 0 and curr_y_pos > line_y1:
                        new_count = 1
                        track_obj.counted = True

                        #confirm that person was detected and update count
                        count = updateArduino(arduino, new_count, count,
                                              threshold, time_now, device_name,
                                              database)

            trackObjs[objectID] = track_obj

            #draw circle at centroid
            cv2.circle(img, (curr_x_pos, curr_y_pos), 4, YELLOW, -1)

        text = "Total Count: " + str(count)

        #cv2.putText(img, text, (X,Y), font, fontScale, colour, thickness)
        cv2.putText(img, text, (10, height - 20), cv2.FONT_HERSHEY_TRIPLEX,
                    0.6, YELLOW, 2)

        cv2.imshow("People Counting Device", img)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        total_frames = total_frames + 1
        fps.update()

    fps.stop()

    vid.release()

    cv2.destroyAllWindows()
Exemplo n.º 31
0
from collections import defaultdict
from imutils.video import FPS
import imagezmq

# instantiate image_hub
image_hub = imagezmq.ImageHub()

image_count = 0
sender_image_counts = defaultdict(int)  # dict for counts by sender
first_image = True

try:
    while True:  # receive images until Ctrl-C is pressed
        sent_from, jpg_buffer = image_hub.recv_jpg()
        if first_image:
            fps = FPS().start(
            )  # start FPS timer after first image is received
            first_image = False
        image = cv2.imdecode(np.fromstring(jpg_buffer, dtype='uint8'), -1)
        # see opencv docs for info on -1 parameter
        fps.update()
        image_count += 1  # global count of all images received
        sender_image_counts[sent_from] += 1  # count images for each RPi name
        cv2.imshow(sent_from, image)  # display images 1 window per sent_from
        cv2.waitKey(1)
        image_hub.send_reply(b'OK')  # REP reply
except (KeyboardInterrupt, SystemExit):
    pass  # Ctrl-C was pressed to end program; FPS stats computed below
except Exception as ex:
    print('Python error with no Exception handler:')
    print('Traceback error:', ex)
    traceback.print_exc()
Exemplo n.º 32
0
if _platform == "linux2":
    video = imv.VideoStream(usePiCamera=True, resolution=(screenwidth,screenheight), framerate=60)
else:
    video = imv.VideoStream(resolution=(640,480))
video.start()
# Camera warmup
time.sleep(1)

#Fullscreen
cv2.namedWindow("Frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Frame", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.setMouseCallback("Frame", click_and_close)

ser = connect_to_arduino()
ip = getIP()
fps = FPS().start()

# keep looping
while looping:
    # grab the current frame and initialize the status text
    frame = video.read()
    status = "No Targets"
    status2 = "-:-"

    # convert the frame to grayscale, blur it, and detect edges
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (7, 7), 0)
    edged = cv2.Canny(blurred, 30, 230)

    # find contours in the edge map
    (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
Exemplo n.º 33
0
def plant_growth_health_tracking():
    # define the paths to the Keras deep learning model
    MODEL_PATH = "plantgrowthv2.model"

    #INPUT_SIZE: the width and height of our input to whcih the image is resized prior to being fed into the CNN
    INPUT_SIZE = (400, 400)
    #PYR_SCALE: the scale of our image pyramid
    PYR_SCALE = 1.5
    #WIN_STEP: step of our sliding window
    WIN_STEP = 8
    #ROI_SIZE: input ROI size to our CNN as if we were perforimg classification
    ROI_SIZE = (96, 96)
    #BATCH_SIZE: size of batch to be passed through the CNN
    BATCH_SIZE = 32

    # load the model
    print("loading model...")
    model = load_model(MODEL_PATH)

    #initialize the object detection dictionary which maps class labels to their predicted bounding boxes and associated probability
    labels = {
        "Futterkohl_Gruner_Ring": [],
        "Lollo_Rossa": [],
        "Mangold_Lucullus": [],
        "Pak_Choi": [],
        "Tatsoi": []
    }

    # initialize the video stream and allow the camera sensor to warm up
    print("starting video stream...")
    #vs = VideoStream(src=0).start()
    vs = VideoStream(usePiCamera=True).start()
    time.sleep(2.0)
    fps = FPS().start()

    # loop over the frames from the video stream
    while fps._numFrames < 100:
        # grab the frame from the threaded video stream and resize it
        # to be a square
        frame = vs.read()
        (h, w) = frame.shape[:2]
        frame = cv2.resize(frame, INPUT_SIZE, interpolation=cv2.INTER_CUBIC)

        #initialize the batch ROIs and (x,y)-coordinates
        batchROIs = None
        batchLocs = []

        #gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        #a typical green will have still have some red and blue
        #we will get the lower-light mixes of all colors still
        lower_green = np.array([30, 100, 100])
        upper_green = np.array([90, 255, 255])

        #create a mask for a specific range
        #pixels in the range will be converted to pure white
        #pixels outside the range will be converted to black
        mask = cv2.inRange(hsv, lower_green, upper_green)

        #restore 'green-ness' by running a bitwise operation
        #show color when there is the frame AND the mask
        res = cv2.bitwise_and(frame, frame, mask=mask)

        date_yyyymmdd = time.strftime("%Y-%m-%d")
        #saves 5 color filtered images in JPG format in the working directory for post
        for index in range(1, 6):
            cv2.imwrite(
                str(date_yyyymmdd) + "_pic_" + str(index) + ".jpg", res)

        #start the timer
        print("detecting plants...")

        # loop over the image pyramid
        for image in image_pyramid(frame, scale=PYR_SCALE, minSize=ROI_SIZE):
            # loop over the sliding window locations
            for (x, y, roi) in sliding_window(image, WIN_STEP, ROI_SIZE):
                #take the ROI and preprocess it so we can classify the region with Keras
                roi = img_to_array(roi)
                roi = np.expand_dims(roi, axis=0)

                #if batchROIs is None, initialize it
                if batchROIs is None:
                    batchROIs = roi

                #otherwise add the ROI to the bottom of the batch
                else:
                    batchROIs = np.vstack([batchROIs, roi])

                # append the (x,y)-coordinates of the sliding window to the batch
                batchLocs.append((x, y))

        #check to see if the batch is full
        if len(batchROIs) == BATCH_SIZE:
            #classify the batch, then reset the batch ROIs and (x,y)-coordinates
            labels = classify_batch(model,
                                    batchROIs,
                                    batchLocs,
                                    labels,
                                    minProb=0.5)

        #reset the batch ROIs and (x,y) -coordinates
        batchROIs = None
        batchLocs = []

        #check to see if there are any remaining ROIs that still need to be classified
        if batchROIs is not None:
            labels = classify_batch(model,
                                    batchROIs,
                                    batchLocs,
                                    labels,
                                    minProb=0.5)

        #loop over all the class labels for each detected object in the image
        for classLabel in labels.keys():
            #grab the bounding boxes and associated probabilities for each detection and apply non-maxima suppression
            #to suppress weaker overlapping detections
            boxes = np.array(p[0] for p in labels[classLabel])
            proba = np.array(p[1] for p in labels[classLabel])
            boxes = non_max_suppression(boxes, proba)

            #loop over the bounding boxes again, this time only drawing the ones that were not suppressed
            for (xA, yA, xB, yB) in boxes:
                title = "{}: {:.2f}%".format(classLabel, proba * 100)

                #if str(classLabel) == "Lollo_Rossa":
                cv2.rectangle(frame, (xA, yA), (xB, yB), (255, 0, 255), 2)
                frame = cv2.putText(frame, title, (xA, yB),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                                    (255, 0, 255), 2)

                #elif str(classLabel) == "Tatsoi":
                #    cv2.rectangle(frame, (xA, yA), (xB, yB), (255,0,0), 2)
                #    frame = cv2.putText(frame, title, (xA, yB), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,0,0), 2)

            if len(labels[classLabel]) > 5:
                # send a text notification to firebase for app to receive
                triggered("Young seedling has matured. Time to transplant" +
                          str(classLabel))

        # show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        fps.update()

    # do a bit of cleanup
    print("cleaning up...")
    fps.stop()
    cv2.destroyAllWindows()
    vs.stop()
Exemplo n.º 34
0
def start_camera():
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-c",
                    "--cascade",
                    required=True,
                    help="path to where the face cascade resides")
    ap.add_argument("-e",
                    "--encodings",
                    required=True,
                    help="path to serialized db of facial encodings")
    ap.add_argument("-o",
                    "--output",
                    required=True,
                    help="path to output directory")
    args = vars(ap.parse_args())

    # load the known faces and embeddings along with OpenCV's Haar
    # cascade for face detection
    print("[INFO] loading encodings + face detector...")
    data = pickle.loads(open(args["encodings"], "rb").read())
    detector = cv2.CascadeClassifier(args["cascade"])

    # initialize the video stream and allow the camera sensor to warm up
    print("[INFO] starting video stream...")
    #vs = VideoStream(src=0).start()
    vs = VideoStream(usePiCamera=True).start()
    time.sleep(2.0)
    total = 0

    # start the FPS counter
    fps = FPS().start()

    flag = True
    # loop over frames from the video file stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to 500px (to speedup processing)
        frame = vs.read()
        orig = frame.copy()
        frame = imutils.resize(frame, width=500)

        # convert the input frame from (1) BGR to grayscale (for face
        # detection) and (2) from BGR to RGB (for face recognition)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # detect faces in the grayscale frame
        rects = detector.detectMultiScale(gray,
                                          scaleFactor=1.1,
                                          minNeighbors=5,
                                          minSize=(30, 30),
                                          flags=cv2.CASCADE_SCALE_IMAGE)

        # OpenCV returns bounding box coordinates in (x, y, w, h) order
        # but we need them in (top, right, bottom, left) order, so we
        # need to do a bit of reordering
        boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]

        # compute the facial embeddings for each face bounding box
        encodings = face_recognition.face_encodings(rgb, boxes)
        names = []

        # loop over the facial embeddings
        for encoding in encodings:
            # attempt to match each face in the input image to our known
            # encodings
            matches = face_recognition.compare_faces(data["encodings"],
                                                     encoding)
            name = "Unknown"

            # check to see if we have found a match
            if True in matches:
                # find the indexes of all matched faces then initialize a
                # dictionary to count the total number of times each face
                # was matched
                matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                counts = {}

                # loop over the matched indexes and maintain a count for
                # each recognized face face
                for i in matchedIdxs:
                    name = data["names"][i]
                    counts[name] = counts.get(name, 0) + 1

                # determine the recognized face with the largest number
                # of votes (note: in the event of an unlikely tie Python
                # will select first entry in the dictionary)
                name = max(counts, key=counts.get)

            # update the list of names
            names.append(name)

        # loop over the recognized faces
        for ((top, right, bottom, left), name) in zip(boxes, names):
            # draw the predicted face name on the image
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
            y = top - 15 if top - 15 > 15 else top + 15
            cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                        (0, 255, 0), 2)
            # LED turnon
            if flag and name == 'rayhu':
                GPIO.output(LED1_PIN, GPIO.HIGH)
                GPIO.output(LED2_PIN, GPIO.HIGH)
                GPIO.output(LED3_PIN, GPIO.HIGH)
                flag = False
                # take picture
                p = os.path.sep.join(
                    [args["output"], "{}.png".format(str(total).zfill(5))])
                cv2.imwrite(p, orig)
                total += 1
                print("picture has been taken!")

        # display the image to our screen
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # update the FPS counter
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()

    # LED turnoff
    GPIO.output(LED1_PIN, GPIO.LOW)
    GPIO.output(LED2_PIN, GPIO.LOW)
    GPIO.output(LED3_PIN, GPIO.LOW)
    GPIO.cleanup()  #確保程式結束後,用到的腳位皆回復到預設狀態。
args = vars(ap.parse_args())

CLASSES = [
    "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus",
    "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
    "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"
]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
# vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
fps = FPS().start()

while True:
    frame = vs.read()
    frame = imutils.resize(frame, width=400)

    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                 (300, 300), 127.5)

    net.setInput(blob)
    detections = net.forward()

    for i in np.arange(0, detections.shape[2]):
        confidence = detections[0, 0, i, 2]
Exemplo n.º 36
0
class DroidVisionThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.running = True
        self.fps_counter = FPS().start()
        self.camera = PiVideoStream(resolution=(config.RAW_FRAME_WIDTH, config.RAW_FRAME_HEIGHT))
        self.camera.start()
        time.sleep(config.CAMERA_WARMUP_TIME) # wait for camera to initialise
        self.frame = None
        self.frame_chroma = None
        self.centre = config.CENTRE
        self.can_see_lines = False
        self.last_yellow_mean = config.WIDTH * 0.8
        self.last_blue_mean = config.WIDTH * 0.2
        self.desired_steering = config.NEUTRAL_STEERING # 0 = left, 0.5 = center, 1 = right
        self.desired_throttle = config.NEUTRAL_THROTTLE # 0 = stop, 0.5 = medium speed, 1 = fastest

    def run(self):
        debug("DroidVisionThread: Thread started")
        self.vision_processing()
        self.camera.stop()
        cv2.destroyAllWindows()
        debug("DroidVisionThread: Thread stopped")

    def stop(self):
        self.running = False
        debug("DroidVisionThread: Stopping thread")

    def vision_processing(self):
        while self.running:
            self.grab_frame()

            # colour mask
            blue_mask = cv2.inRange(self.frame_chroma, config.BLUE_CHROMA_LOW, config.BLUE_CHROMA_HIGH)
            yellow_mask = cv2.inRange(self.frame_chroma, config.YELLOW_CHROMA_LOW, config.YELLOW_CHROMA_HIGH)
            colour_mask = cv2.bitwise_or(blue_mask, yellow_mask)
            colour_mask = cv2.erode(colour_mask, config.ERODE_KERNEL)
            colour_mask = cv2.dilate(colour_mask, config.DILATE_KERNEL)
            
            # lines
            lines = cv2.HoughLinesP(colour_mask, config.HOUGH_LIN_RES, config.HOUGH_ROT_RES, config.HOUGH_VOTES, config.HOUGH_MIN_LEN, config.HOUGH_MAX_GAP)
            blue_lines = np.array([])
            yellow_lines = np.array([])
            if lines != None:
                for line in lines:
                    x1,y1,x2,y2 = line[0]
                    angle = np.rad2deg(np.arctan2(y2-y1, x2-x1))
                    if config.MIN_LINE_ANGLE < abs(angle) < config.MAX_LINE_ANGLE:
                        if config.IMSHOW:
                            cv2.line(self.frame, (x1,y1), (x2,y2), (0,0,255), 1)
                        if angle > 0:
                            yellow_lines = np.append(yellow_lines, [x1, x2])
                        elif angle < 0:
                            blue_lines = np.append(blue_lines, [x1, x2])

            # find centre
            blue_mean = self.last_blue_mean
            yellow_mean = self.last_yellow_mean
            if len(blue_lines):
                blue_mean = int(np.mean(blue_lines))
            if len(yellow_lines):
                yellow_mean = int(np.mean(yellow_lines))
            self.centre = (blue_mean + yellow_mean) / 2.0
            self.last_blue_mean = blue_mean
            self.last_yellow_mean = yellow_mean

            self.can_see_lines = (len(blue_lines) or len(yellow_lines))

            # set steering and throttle
            self.desired_steering = (1.0 - (self.centre / config.WIDTH))
            if len(blue_lines) or len(yellow_lines):
                self.desired_throttle = 0.22
            else:
                self.desired_throttle = 0

            if config.IMSHOW:
                cv2.circle(self.frame, (int(self.centre), config.HEIGHT - 20), 10, (0,0,255), -1)
                cv2.imshow("colour_mask without noise", colour_mask)
                cv2.imshow("raw frame", self.frame)
                cv2.waitKey(1)

    def grab_frame(self):
        self.fps_counter.update()
        # grab latest frame and index the ROI
        self.frame = self.camera.read()
        self.frame = self.frame[config.ROI_YMIN:config.ROI_YMAX, config.ROI_XMIN:config.ROI_XMAX]
        # convert to chromaticity colourspace
        B = self.frame[:, :, 0].astype(np.uint16)
        G = self.frame[:, :, 1].astype(np.uint16)
        R = self.frame[:, :, 2].astype(np.uint16)    
        Y = 255.0 / (B + G + R)
        b = (B * Y).astype(np.uint8)
        g = (G * Y).astype(np.uint8)
        r = (R * Y).astype(np.uint8)
        self.frame_chroma = cv2.merge((b,g,r))

    def get_fps(self):
        self.fps_counter.stop()
        return self.fps_counter.fps()

    def get_error(self):
        return (config.CENTRE - self.centre)

    def get_steering_throttle(self):
        return self.desired_steering, self.desired_throttle
Exemplo n.º 37
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    cctvpath = "rtsp://10.1.1.10:554/rtsp_live/profile_token_0"
    cap = cv2.VideoCapture(cctvpath)
    #cap = cv2.VideoCapture('test_videos/v2.mp4')

    W = None
    H = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
    trackers = []
    trackableObjects = {}

    totalExit = 0
    totalEnterance = 0

    fps = FPS().start()

    # loop over frames from the video stream
    while True:

        rects = []
        trackers = []

        r, img = cap.read()

        if img is None:
            break

        #img = cv2.resize(img, (1280, 720))
        img = imutils.resize(img, width=1024)
        rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        if W is None or H is None:
            (H, W) = img.shape[:2]

        borderPositionX = W - (W // 4)
        borderWidth = 40

        cv2.rectangle(img, (borderPositionX, 0),
                      (borderPositionX + borderWidth, H), (0, 255, 255), 2)

        boxes, scores, classes, num = odapi.processFrame(img)

        for i in range(len(boxes)):
            # Class 1 represents human
            if classes[i] == 1 and scores[i] > threshold:
                box = boxes[i]
                cv2.rectangle(img, (box[1], box[0]), (box[3], box[2]),
                              (255, 0, 0), 2)
                cv2.putText(img, 'Person : ' + str(round(scores[i], 2)),
                            (box[1], box[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (36, 255, 12), 2)

                # construct a dlib rectangle object from the bounding
                # box coordinates and then start the dlib correlation
                # tracker
                tracker = dlib.correlation_tracker()
                rect = dlib.rectangle(box[1], box[0], box[3], box[2])
                tracker.start_track(rgb, rect)

                # add the tracker to our list of trackers so we can
                # utilize it during skip frames
                trackers.append(tracker)

        for tracker in trackers:
            # update the tracker and grab the updated position
            tracker.update(rgb)
            pos = tracker.get_position()

            # unpack the position object
            startX = int(pos.left())
            startY = int(pos.top())
            endX = int(pos.right())
            endY = int(pos.bottom())

            start = (startX, startY)
            end = (endX, endY)

            color = list(np.random.random(size=3) * 256)

            cv2.rectangle(img, start, end, color, 4)

            rects.append((startX, startY, endX, endY))

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = ct.update(rects)

        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            # check to see if a trackable object exists for the current
            # object ID

            to = trackableObjects.get(objectID, None)

            # if there is no existing trackable object, create one
            if to is None:
                to = TrackableObject(objectID, centroid)
            else:
                #y directions of the centroids
                y = [c[1] for c in to.centroids]

                #x directions of the centroids
                x = [c[0] for c in to.centroids]

                avgPosX = np.mean(x)
                startPositionX = centroid[0]

                direction = centroid[0] - avgPosX
                to.centroids.append(centroid)

                if not to.counted:
                    #if not counted and but re-recognized in the area and difference between intial position and last position
                    #its not between the border, do not increase the number.
                    if (borderPositionX > startPositionX and borderPositionX >
                            avgPosX) or (borderPositionX < startPositionX
                                         and borderPositionX < avgPosX):
                        pass
                    else:
                        # if the direction is negative (indicating the object
                        # is moving up) AND the centroid is above the center
                        # line, count the object
                        if direction < 0 and centroid[0] < borderPositionX:
                            totalEnterance += 1
                            to.counted = True

                        # if the direction is positive (indicating the object
                        # is moving down) AND the centroid is below the
                        # center line, count the object
                        elif direction > 0 and centroid[0] > borderPositionX:
                            totalExit += 1
                            to.counted = True

            text = "ID {}".format(objectID)
            cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

            trackableObjects[objectID] = to

        # construct a tuple of information we will be displaying on the
        # frame
        info = [("Enter", totalEnterance), ("Exit", totalExit),
                ("Person Inside", str(totalEnterance - totalExit))]

        # loop over the info tuples and draw them on our frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(img, text, (10, H - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        fps.update()

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = img.copy()

    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
Exemplo n.º 38
0
#camera.resolution = (320, 240)
#camera.framerate = 32
#rawCapture = PiRGBArray(camera, size=(320, 240))
#stream = camera.capture_continuous(rawCapture, format="bgr", use_video_port=True)

# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())

# created a "threaded" video stream, allow the camera sensor to warmup, and start the FPS counter
print("[INFO] sampling THREAD frames from picamera module");
vs = PiVideoStream().start()
time.sleep(2.0)
fps = FPS().start()

# Initialize the first frame in the video stream
firstFrame = None

# Loop over the frames of the video
while True :
	# grab the current frame ans initialize the occupied/unoccupied test
	frame = vs.read()
	text = "Unocoppied"
	
	# resize the frame, convert it to greyscale, and blur it
	frame = imutils.resize(frame, width=500)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	gray = cv2.GaussianBlur(gray, (21,21), 0)
	
Exemplo n.º 39
0
def run_program():
    # load our serialized model from disk
    print("[INFO] loading model...")
    network = cv2.dnn.readNetFromCaffe(conf["prototxt_path"],
                                       conf["model_path"])
    # network.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

    videoStream = initialize_video_stream()

    # construct alarm thread
    alarmThread = AlarmHandler(src=conf["alarm_source"])

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    H = None
    W = None

    # instantiate our centroid tracker, then initialize a list to store
    # each of our dlib correlation trackers, followed by a dictionary to
    # map each unique object ID to a TrackableObject
    centroidTracker = CentroidTracker(maxDisappeared=conf["max_disappear"],
                                      maxDistance=conf["max_distance"])
    trackers = []
    trackableObjects = {}

    # keep the count of total number of frames
    totalFrames = 0

    # start the frames per second throughput estimator
    fps = FPS().start()

    # loop over the frames of the stream
    while True:
        # load the line configuration file
        lines_offset_conf = Conf(inputArguments["lines"])
        # instantiate license Numbers inputs Dictionary
        licenseNumberInputs = []
        # grab the next frame from the stream
        currentFrame = get_frame_from_video_stream(videoStream=videoStream)

        # check if the frame is None, if so, break out of the loop
        if currentFrame is None:
            break

        # resize the frame
        currentFrame = imutils.resize(currentFrame, width=conf["frame_width"])
        rgb = cv2.cvtColor(currentFrame, cv2.COLOR_BGR2RGB)

        # if the frame dimensions are empty, set them
        if W is None or H is None:
            (H, W) = currentFrame.shape[:2]

        # initialize our list of bounding box rectangles returned by
        # either (1) our object detector or (2) the correlation trackers
        boundingBoxes = []

        # check to see if we should run a more computationally expensive
        # object detection method to aid our tracker
        if totalFrames % conf["track_object"] == 0:
            detections = obtain_detections_in_frame(frame=currentFrame,
                                                    network=network)
            trackers = build_trackers_list_from_detections(
                detections, rgb, H, W)

        # otherwise, we should utilize our object *trackers* rather than
        # object *detectors* to obtain a higher frame processing
        # throughput
        else:
            boundingBoxes = build_bounding_box_list_from_trackers_list(
                trackers, rgb)

        # save clean original frame before drawing on it
        cleanFrame = currentFrame.copy()
        # draw 2 vertical lines and one horizontal line in the frame -- once an
        # object crosses these lines we will start the timers
        (LeftToRightLine, RightToLeftLine,
         HorizontalLine) = calculate_zone_boundaries(H, W, lines_offset_conf)
        draw_lines(LeftToRightLine, RightToLeftLine, HorizontalLine,
                   currentFrame, H, W)

        # use the centroid tracker to associate the (1) old object
        # centroids with (2) the newly computed object centroids
        objects = centroidTracker.update(boundingBoxes)
        # loop over the tracked objects
        for (objectID, centroid) in objects.items():
            trackingBoundingBox = centroidTracker.objectsToBoundingBoxes[
                centroid.tostring()]
            # check to see if a trackable object exists for the current
            # object ID
            currentTrackableObject = trackableObjects.get(objectID, None)
            # if there is no existing trackable object, create one
            if currentTrackableObject is None:
                currentTrackableObject = create_trackable_object(
                    RightToLeftLine, LeftToRightLine, HorizontalLine, centroid,
                    objectID)
            # otherwise, there is a trackable object
            else:
                append_license_input_info_to_inputs_list(
                    currentTrackableObject, licenseNumberInputs, objectID,
                    cleanFrame, trackingBoundingBox)
                check_and_handle_objects_location_in_frame(
                    currentTrackableObject, RightToLeftLine, LeftToRightLine,
                    HorizontalLine, objectID, centroid, cleanFrame,
                    currentFrame, alarmThread)

            # store the trackable object in our dictionary
            trackableObjects[objectID] = currentTrackableObject
            draw_information_on_object(currentFrame, centroid, objectID,
                                       currentTrackableObject,
                                       trackingBoundingBox)

        if not display_frame(currentFrame):
            break

        # increment the total number of frames processed thus far and
        # then update the FPS counter
        totalFrames += 1
        fps.update()

        stop_alarm_for_dissappeared_object(
            inputAlarmThread=alarmThread,
            inputObjectsList=centroidTracker.objects)

        # handle plate recognition from data collected earlier and assign it to an object
        handle_license_plates_in_frame(licenseNumberInputs, trackableObjects,
                                       totalFrames)

    end_program(fps, videoStream, alarmThread)
ap.add_argument("-d", "--display", type=int, default=-1,
    help="Whether or not frames should be displayed")
args = vars(ap.parse_args())
 
# initialize the camera and stream
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(320, 240))
stream = camera.capture_continuous(rawCapture, format="bgr",
    use_video_port=True)

# allow the camera to warmup and start the FPS counter
print("[INFO] sampling frames from `picamera` module...")
time.sleep(2.0)
fps = FPS().start()
 
# loop over some frames
for (i, f) in enumerate(stream):
    # grab the frame from the stream and resize it to have a maximum
    # width of 400 pixels
    frame = f.array
    frame = imutils.resize(frame, width=400)
 
    # check to see if the frame should be displayed to our screen
    if args["display"] > 0:
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF
 
    # clear the stream in preparation for the next frame and update
    # the FPS counter
    def recognize(video):

        path = os.path.dirname(os.path.abspath("__file__")) + '/'
        embeddingsPath = path + 'Encoding_Data/'
        faceModel_path = 'shape_predictor_68_face_landmarks.dat'
        faceDbPath = path + 'FacesDB/'

        #        model = cls.loadPickleFile(embeddingsPath,file='FaceEncodingsModel.pkl')
        #getting the people list
        data = [d for d in os.listdir(embeddingsPath)
                if '.DS_Store' not in d][0]
        file = 'FaceEncodingsModel.pkl'
        with open(embeddingsPath + file, 'rb') as f:
            data = pickle.load(f)

        model = data
        person = model.classes_

        #gettign the Frontal face area from the imag
        j = 0
        faceDetector = dlib.get_frontal_face_detector()
        #getting landmark Points
        landMarker = dlib.shape_predictor(faceModel_path)
        person_array = []
        count = 0
        cnt = 0
        len_faces = 0
        #cap = cv2.VideoCapture("vid2.mp4")

        cap = cv2.VideoCapture(video)
        fps = FPS().start()

        print("Input video is laoded... ")

        fourcc = cv2.VideoWriter_fourcc('X', '2', '6', '4')
        writer = cv2.VideoWriter('TestVideo.avi', fourcc, 12.0,
                                 (int(450), int(400)))
        writer = None
        print("Entering the loop...")
        l = True

        while (l):
            ret, frame = cap.read()
            if (ret == True):
                frame = imutils.rotate_bound(frame, 90)
                frame = imutils.resize(frame, width=450)
                if (len_faces == 0):
                    j = 6
                else:
                    j = 1

                if (cnt % j == 0):

                    #frame = imutils.rotate_bound(frame, 180)

                    (h, w) = frame.shape[:2]
                    #ret,frame = cap.read()
                    #print(ret)
                    if writer is None:
                        (h, w) = frame.shape[:2]
                        writer = cv2.VideoWriter('TestVideo.mp4', fourcc, 10.0,
                                                 (w, h), True)

            # =============================================================================
                    print('Starting up')

                    try:
                        faceLocs = face_recognition.face_locations(frame)

                        faceEncodings = face_recognition.face_encodings(
                            frame, faceLocs)
                        faces = faceDetector(frame, 1)
                        len_faces = len(faces)
                        print('len of Faces: ', len(faces))
                    except Exception as e:
                        print('error')
                        print(e)
                        continue

                    thresh = 0.6

                    cv2.putText(frame, 'Threshold = {}'.format(str(thresh)),
                                (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.6,
                                (200, 0, 0), 2)

                    for i in range(0, len(faces)):
                        newRect = dlib.rectangle(int(faces[i].left()),
                                                 int(faces[i].top()),
                                                 int(faces[i].right()),
                                                 int(faces[i].bottom()))

                        #Getting x,y,w,h coordinate
                        x = newRect.left()
                        y = newRect.top()
                        w = newRect.right() - x
                        h = newRect.bottom() - y
                        X, Y, W, H = x, y, w, h

                        prob = model.predict_proba(
                            np.array(faceEncodings[i]).reshape(1, -1))[0]
                        index = np.argmax(prob)

                        if np.max(prob) > float(thresh):
                            text = str(person[index] +
                                       str(np.round_(np.max(prob), 2)))
                            cv2.rectangle(frame, (x, y), (x + w, y + h),
                                          (0, 255, 0), 2)
                            cv2.putText(frame, text, (x, y),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.6,
                                        (0, 255, 0), 2)

                            remove_digits = str.maketrans('', '', digits)
                            name = person[index].translate(remove_digits)

                            if name not in person_array:
                                person_array.append(name)

                            path = os.path.dirname(
                                os.path.abspath("__file__")
                            ) + '/' + 'RecognizedFaces' + '/' + str(
                                text) + '.jpg'
                            cv2.imwrite(path, frame[y:y + h, x:x + w])
                        else:
                            text = 'Unknown'
                            cv2.rectangle(frame, (x, y), (x + w, y + h),
                                          (0, 0, 255), 1)
                            cv2.putText(frame, text, (x, y),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.6,
                                        (0, 0, 255), 2)

                            if not os.path.exists(faceDbPath + 'Unknown'):
                                os.mkdir(faceDbPath + 'Unknown')

                            path = faceDbPath + 'Unknown' + '/' + str(
                                cnt) + '.jpg'
                            cv2.imwrite(path, frame[y:y + h, x:x + w])

                    cv2.imshow('frame', frame)
                    writer.write(frame)
            else:
                l = False
            if cv2.waitKey(20) & 0xFF == ord('q'):
                break
            cnt += 1
        print("Done processing the video ...")
        #cap.release()
        cv2.destroyAllWindows()

        writer.release()
        return person_array