コード例 #1
0
ファイル: detect.py プロジェクト: hender14/Raspberry-Pi4
def main(mode, tiny, iou_threshold, confidence_threshold, path):
  class_names, n_classes = load_class_names()
  if tiny:
    model = YOLOv3_tiny(n_classes=n_classes,
                        iou_threshold=iou_threshold,
                        confidence_threshold=confidence_threshold)
  else:
    model = YOLOv3(n_classes=n_classes,
                  iou_threshold=iou_threshold,
                  confidence_threshold=confidence_threshold)
  inputs = tf.placeholder(tf.float32, [1, *model.input_size, 3])
  detections = model(inputs)
  saver = tf.train.Saver(tf.global_variables(scope=model.scope))

  with tf.Session() as sess:
    saver.restore(sess, './weights/model-tiny.ckpt' if tiny else './weights/model.ckpt')

    if mode == 'image':
      image = load_image(path, input_size=model.input_size)
      result = sess.run(detections, feed_dict={inputs: image})
      draw_boxes(path, boxes_dict=result[0], class_names=class_names, input_size=model.input_size)
      return

    elif mode == 'video':
      cv2.namedWindow("Detections")
      video = cv2.VideoCapture(path)
      fourcc = int(video.get(cv2.CAP_PROP_FOURCC))
      fps = video.get(cv2.CAP_PROP_FPS)
      frame_size = (int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)))
      out = cv2.VideoWriter('./detections/video_output.mp4', fourcc, fps, frame_size)
      print("Video being saved at \"" + './detections/video_output.mp4' + "\"")
      print("Press 'q' to quit")
      while True:
        retval, frame = video.read()
        if not retval:
          break
        resized_frame = cv2.resize(frame, dsize=tuple((x) for x in model.input_size[::-1]), interpolation=cv2.INTER_NEAREST)
        result = sess.run(detections, feed_dict={inputs: [resized_frame]})
        draw_boxes_frame(frame, frame_size, result, class_names, model.input_size)
        cv2.imshow("Detections", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            break
        out.write(frame)
      cv2.destroyAllWindows()
      video.release()
      return

    elif mode == 'webcam':
      while True:
        frame, addr = receive()
        frame_size = (frame.shape[1], frame.shape[0])
        resized_frame = cv2.resize(frame, dsize=tuple((x) for x in model.input_size[::-1]), interpolation=cv2.INTER_NEAREST)
        result = sess.run(detections, feed_dict={inputs: [resized_frame]})
        draw_boxes_frame(frame, frame_size, result, class_names, model.input_size)
        jpgstring = cv2.imencode(".jpg", frame)
        packet = jpgstring[1].tostring()
        udpServSock.sendto(packet, addr)
      return
コード例 #2
0
def main(mode, tiny, iou_threshold, confidence_threshold, path):
    rectangleColor = (0, 255, 0)
    frameCounter = 0
    currentCarID = 0
    fps = 0

    carTracker = {}
    carNumbers = {}
    carLocation1 = {}
    carLocation2 = {}
    speed = [None] * 1000

    class_names, n_classes = load_class_names()
    if tiny:
        model = YOLOv3_tiny(n_classes=n_classes,
                            iou_threshold=iou_threshold,
                            confidence_threshold=confidence_threshold)
    else:
        model = YOLOv3(n_classes=n_classes,
                       iou_threshold=iou_threshold,
                       confidence_threshold=confidence_threshold)
    inputs = tf.placeholder(tf.float32, [1, *model.input_size, 3])
    detections = model(inputs)
    saver = tf.train.Saver(tf.global_variables(scope=model.scope))

    with tf.Session() as sess:
        saver.restore(
            sess, './weights/model-tiny.ckpt' if tiny else './weights/model.ckpt')

        if mode == 'image':
            image = load_image(path, input_size=model.input_size)
            result = sess.run(detections, feed_dict={inputs: image})
            draw_boxes(
                path, boxes_dict=result[0], class_names=class_names, input_size=model.input_size)
            return

        cv2.namedWindow("Detections")
        
        
        print("Video being saved at \"" + './detections/video_output.mp4' + "\"")
        print("Press 'q' to quit")
        pathIn = './data/PETS_2000_Frames/'
        files = [f for f in os.listdir(pathIn)]
        files.sort(key= lambda x: int(x.split('.')[0].split('_')[1]))
        frame=cv2.imread(pathIn+files[0])
        frame_size = frame.shape[:2][::-1]
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        
        out = cv2.VideoWriter(
            './detections/video_output.mp4', fourcc, 20, frame_size)
        for i in range(len(files)):
            frame = cv2.imread(pathIn+files[i])
            resized_frame = cv2.resize(frame, dsize=tuple(
                (x) for x in model.input_size[::-1]), interpolation=cv2.INTER_NEAREST)
            result = sess.run(detections, feed_dict={inputs: [resized_frame]})
            draw_boxes_frame(frame, frame_size, result,
                             class_names, model.input_size)
            start_time = time.time()
              # rc, image = video.read()
            if type(frame) == type(None):
              break

            frame = cv2.resize(frame, frame_size)
            resultImage = frame

            frameCounter = frameCounter + 1

            carIDtoDelete = []

            for carID in carTracker.keys():
                trackingQuality = carTracker[carID].update(frame)

                if trackingQuality < 7:
                    carIDtoDelete.append(carID)

            for carID in carIDtoDelete:
                print('Removing carID ' + str(carID) + \
                      ' from list of trackers.')
                print('Removing carID ' + str(carID) + ' previous location.')
                print('Removing carID ' + str(carID) + ' current location.')
                carTracker.pop(carID, None)
                carLocation1.pop(carID, None)
                carLocation2.pop(carID, None)

            if not (frameCounter % 10):
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                cars = carCascade.detectMultiScale(
                    gray, 1.1, 13, 18, (24, 24))

                for (_x, _y, _w, _h) in cars:
                    x = int(_x)
                    y = int(_y)
                    w = int(_w)
                    h = int(_h)

                    x_bar = x + 0.5 * w
                    y_bar = y + 0.5 * h

                    matchCarID = None

                    for carID in carTracker.keys():
                        trackedPosition = carTracker[carID].get_position()

                        t_x = int(trackedPosition.left())
                        t_y = int(trackedPosition.top())
                        t_w = int(trackedPosition.width())
                        t_h = int(trackedPosition.height())

                        t_x_bar = t_x + 0.5 * t_w
                        t_y_bar = t_y + 0.5 * t_h

                        if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <= (t_y + t_h)) and (x <= t_x_bar <= (x + w)) and (y <= t_y_bar <= (y + h))):
                            matchCarID = carID

                    if matchCarID is None:
                        print('Creating new tracker ' + str(currentCarID))

                        tracker = dlib.correlation_tracker()
                        tracker.start_track(
                            frame, dlib.rectangle(x, y, x + w, y + h))

                        carTracker[currentCarID] = tracker
                        carLocation1[currentCarID] = [x, y, w, h]

                        currentCarID = currentCarID + 1

            for carID in carTracker.keys():
                trackedPosition = carTracker[carID].get_position()
                t_x = int(trackedPosition.left())
                t_y = int(trackedPosition.top())
                t_w = int(trackedPosition.width())
                t_h = int(trackedPosition.height())
                carLocation2[carID] = [t_x, t_y, t_w, t_h]

            end_time = time.time()

            if not (end_time == start_time):
                # print("Reached at 168") 
                fps = 1.0/(end_time - start_time)
            for i in carLocation1.keys():
                if frameCounter % 1 == 0:
                    [x1, y1, w1, h1] = carLocation1[i]
                    [x2, y2, w2, h2] = carLocation2[i]

                    # print 'previous location: ' + str(carLocation1[i]) + ', current location: ' + str(carLocation2[i])
                    carLocation1[i] = [x2, y2, w2, h2]
                    # print("Reached at 177")

                    # print 'new previous location: ' + str(carLocation1[i])
                    if [x1, y1, w1, h1] != [x2, y2, w2, h2]:
                        # print("Reached at 181") 
                        if (speed[i] == None or speed[i] == 0):
                            speed[i] = estimateSpeed(
                                [x1, y1, w1, h1], [x2, y2, w2, h2])

                        # if y1 > 275 and y1 < 285:
                        if speed[i] != None:
                            print(str(int(speed[i])) + " km/hr")
                            cv2.putText(resultImage, str(int(speed[i])) + " km/hr", (int(x1 + w1/2), int(y1-5)), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 2)
            cv2.imshow("Detections", resultImage)
            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                break
            out.write(resultImage)
        cv2.destroyAllWindows()
コード例 #3
0

if __name__ == '__main__':
    cam = cv2.VideoCapture(0)
    pred = Predictor('config.json', 'model.h5')
    person_pred = Person_Predictor(0.5, 0.5)
    pusher = DBPusher()
    while True:
        # get frame
        ret, frame = cam.read()

        # predict humans and generate points
        boxes, image = pred.predict(frame)
        im, res = person_pred.predict(frame)
        num = draw_boxes_frame(frame, image.shape, res,
                               person_pred.class_names,
                               person_pred.model.input_size, 'person')
        image_h, image_w, _ = image.shape
        points = bboxes_to_points(boxes, image_w, image_h)
        print(len(res[0][num]))
        points_2 = [(int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2))
                    for box in res[0][num]]
        points.extend(points_2)
        if len(points) == 0: continue
        msts = Graph(coords=points).mst().prune_long_edges(1)

        clusters = []
        lines = []
        for mst in msts:
            r2 = poly_r_squared_of_points(mst.coords)
            print(r2)