コード例 #1
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--model', help='model path', default=default_model)
  parser.add_argument(
      '--labels', help='label file path', default=default_labels)
  parser.add_argument(
      '--threshold', type=float, default=0.2, help='class score threshold')
  args = parser.parse_args()

  print('Loading %s with %s labels.' % (args.model, args.labels))

  config = vot.ObjectTrackingConfig(score_threshold=args.threshold)
  engine = vot.load(args.model, args.labels, config)
  input_size = engine.input_size()
  fps_calculator = utils.FpsCalculator()

  blank_image = np.zeros((input_size.height, input_size.width, 3),
                         dtype=np.uint8)

  while True:
    # Run inference engine to populate annotations array.
    annotations = []
    timestamp = int(round(time.time() * 1000))
    engine.run(timestamp, blank_image, annotations)

    # Calculate FPS and latency.
    fps, latency = fps_calculator.measure()
    print('FPS: {}\t\t\tLatency: {}ms'.format(fps, latency))
コード例 #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='model path', default=default_model)
    parser.add_argument('--labels',
                        help='label file path',
                        default=default_labels)
    parser.add_argument('--threshold',
                        type=float,
                        default=0.2,
                        help='class score threshold')
    parser.add_argument('--use_tracker',
                        type=bool,
                        default=False,
                        help='use an object tracker')
    args = parser.parse_args()

    print('Loading %s with %s labels.' % (args.model, args.labels))

    config = vot.ObjectTrackingConfig(
        score_threshold=args.threshold,
        tracker=vot.Tracker.BASIC if args.use_tracker else vot.Tracker.NONE)
    engine = vot.load(args.model, args.labels, config)
    input_size = engine.input_size()
    fps_calculator = utils.FpsCalculator()

    cap = cv2.VideoCapture(0)

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # Converts image to PIL Image.
        pil_im = Image.fromarray(cv2.cvtColor(frame,
                                              cv2.COLOR_BGR2RGB)).resize(
                                                  (input_size.width,
                                                   input_size.height))

        # Grabs current millisecond for timestamp.
        timestamp = current_milli_time()

        # Run inference engine to populate annotations array.
        annotations = []
        if engine.run(timestamp, pil_im, annotations):
            frame = utils.render_bbox(frame, annotations)

        # Calculate FPS, then visualize it.
        fps, latency = fps_calculator.measure()
        frame = cv2.putText(frame, '{} fps'.format(fps), (0, 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        frame = cv2.putText(frame, '{} ms'.format(latency), (0, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
コード例 #3
0
def main():
    default_video = 'data/traffic_frames.mp4'
    default_model = 'data/traffic_model_edgetpu.tflite'
    default_labels = 'data/traffic_label_map.pbtxt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='model path', default=default_model)
    parser.add_argument('--labels',
                        help='label file path',
                        default=default_labels)
    parser.add_argument('--input_video',
                        help='input video file path',
                        default=default_video)
    parser.add_argument('--output_video',
                        help='output video file path',
                        default='')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.2,
                        help='class score threshold')
    parser.add_argument('--use_tracker',
                        type=bool,
                        default=False,
                        help='use an object tracker')
    args = parser.parse_args()

    print('Loading %s with %s labels.' % (args.model, args.labels))

    config = vot.ObjectTrackingConfig(
        score_threshold=args.threshold,
        tracker=vot.Tracker.BASIC if args.use_tracker else vot.Tracker.NONE)
    engine = vot.load(args.model, args.labels, config)
    input_size = engine.input_size()

    cap = cv2.VideoCapture(args.input_video)

    writer = None
    if cap.isOpened() and args.output_video:
        writer = cv2.VideoWriter(args.output_video,
                                 cv2.VideoWriter_fourcc(*'mp4v'),
                                 cap.get(cv2.CAP_PROP_FPS),
                                 (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                                  int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))

    timestamp = 0
    while cap.isOpened():
        ret, frame = cap.read()

        if not ret:
            break

        # Resizes frame.
        resized_frame = cv2.resize(frame,
                                   (input_size.width, input_size.height))

        # Calculates current microsecond for timestamp.
        timestamp = int(timestamp +
                        (1 / cap.get(cv2.CAP_PROP_FPS)) * 1000 * 1000)

        # Run inference engine to populate annotations array.
        annotations = []
        if engine.run(timestamp, resized_frame, annotations):
            frame = utils.render_bbox(frame, annotations)

        if writer:
            writer.write(frame)
        else:
            cv2.imshow('frame', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    if writer:
        writer.release()
    else:
        cv2.destroyAllWindows()
    cap.release()
コード例 #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='model path', default=default_model)
    parser.add_argument('--labels',
                        help='label file path',
                        default=default_labels)
    parser.add_argument('--threshold',
                        type=float,
                        default=0.2,
                        help='class score threshold')
    parser.add_argument('--use_tracker',
                        type=bool,
                        default=False,
                        help='use an object tracker')
    args = parser.parse_args()

    print('Loading %s with %s labels.' % (args.model, args.labels))
    print('Welcome to Dominadas Madafakers')

    config = vot.ObjectTrackingConfig(
        score_threshold=args.threshold,
        tracker=vot.Tracker.BASIC if args.use_tracker else vot.Tracker.NONE)
    engine = vot.load(args.model, args.labels, config)
    input_size = engine.input_size()
    fps_calculator = utils.FpsCalculator()

    cap = cv2.VideoCapture(0)

    #My Dominada Counter
    dominadaCount = 0

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # Converts image to PIL Image.
        pil_im = Image.fromarray(frame).convert('RGB').resize(
            (input_size.width, input_size.height))

        # Grabs current millisecond for timestamp.
        timestamp = current_milli_time()

        # Run inference engine to populate annotations array.
        annotations = []

        if engine.run(timestamp, pil_im, annotations):
            frame = utils.render_bbox(frame, annotations)
            print("JEGF CODE")
            #print("Frame:")
            #print(frame)
            print("annotations:")
            for annotation in annotations:
                #print(annotation.class_name+str(annotation.confidence_score))
                if (annotation.class_name == "dominada"):
                    print(annotation.class_name +
                          str(annotation.confidence_score))

                if (annotation.confidence_score > 0.50):
                    print("More than 50%")
                    if (annotation.class_name == "dominada"):
                        dominadaCount = dominadaCount + 1
                        try:
                            playsound(
                                "/home/mendel/dominadas-autoML/automl-videoOnDevice/examples/referee.mp3"
                            )
                            print("Playing a sound")
                        except:
                            print("Couldn't play a sound")

        # Calculate FPS, then visualize it.
        fps, latency = fps_calculator.measure()

        #frame = cv2.putText(frame, '{} fps'.format(fps), (0, 20),
        #cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        #frame = cv2.putText(frame, '{} ms'.format(latency), (0, 40),
        #cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        frame = cv2.putText(frame, ('Dominadas:' + str(dominadaCount)),
                            (0, 66), cv2.FONT_HERSHEY_SIMPLEX, 1.1,
                            (0, 0, 255), 2)

        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
コード例 #5
0
from PIL import Image
from automl_video_ondevice import object_tracking as vot

default_model = 'data/traffic_model_edgetpu.tflite'
default_labels = 'data/traffic_label_map.pbtxt'

if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument('--model', help='model path', default=default_model)
  parser.add_argument(
      '--labels', help='label file path', default=default_labels)
  parser.add_argument(
      '--threshold', type=float, default=0.2, help='class score threshold')
  args = parser.parse_args()

  config = vot.ObjectTrackingConfig(score_threshold=args.threshold)
  engine = vot.load(args.model, args.labels, config)
  input_size = engine.input_size()

  for i in range(1, 9):
    image = Image.open('data/traffic_frames/000%d.bmp' %
                       i).convert('RGB').resize(
                           (input_size.width, input_size.height))

    out = []
    if engine.run(1, image, out):
      for annotation in out:
        print('{}: {} [{}, {}, {}, {}]'.format(annotation.class_name,
                                               annotation.confidence_score,
                                               annotation.bbox.top,
                                               annotation.bbox.left,
コード例 #6
0
def main():
    default_model = 'data/traffic_model_tftrt.pb'
    default_labels = 'data/traffic_label_map.pbtxt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='model path', default=default_model)
    parser.add_argument('--labels',
                        help='label file path',
                        default=default_labels)
    parser.add_argument('--threshold',
                        type=float,
                        default=0.25,
                        help='class score threshold')
    parser.add_argument('--use_tracker',
                        type=bool,
                        default=False,
                        help='use an object tracker')
    parser.add_argument(
        '--video_device',
        help='-1 for ribbon-cable camera. >= 0 for USB camera. '
        'If both are plugged in, the USB camera will have the ID "1".',
        type=int,
        default=-1)
    parser.add_argument('--video_width',
                        help='Input video width.',
                        type=int,
                        default=1280)
    parser.add_argument('--video_height',
                        help='Input video height.',
                        type=int,
                        default=720)
    args = parser.parse_args()

    print('Loading %s with %s labels.' % (args.model, args.labels))

    config = vot.ObjectTrackingConfig(score_threshold=args.threshold,
                                      tracker=vot.Tracker.FAST_INACCURATE if
                                      args.use_tracker else vot.Tracker.NONE)
    engine = vot.load(args.model, args.labels, config)
    input_size = engine.input_size()
    fps_calculator = utils.FpsCalculator()

    if args.video_device >= 0:
        cap = cv2.VideoCapture(
            'v4l2src device=/dev/video{} ! videoconvert ! '
            'videoscale method=0 add-borders=false ! '
            'video/x-raw, width={}, height={}, format=RGB ! videoconvert ! '
            'appsink'.format(args.video_device, args.video_width,
                             args.video_height), cv2.CAP_GSTREAMER)
    else:
        cap = cv2.VideoCapture(
            'nvarguscamerasrc ! nvvidconv ! '
            'video/x-raw, format=(string)BGRx ! videoconvert ! '
            'videoscale method=0 add-borders=false ! '
            'video/x-raw, width={}, height={}, format=RGB ! videoconvert ! '
            'appsink'.format(args.video_width, args.video_height),
            cv2.CAP_GSTREAMER)

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # Resizes frame.
        resized_frame = cv2.resize(frame,
                                   (input_size.width, input_size.height))

        # Grabs current millisecond for timestamp.
        timestamp = current_milli_time()

        # Run inference engine to populate annotations array.
        annotations = []
        if engine.run(timestamp, resized_frame, annotations):
            frame = utils.render_bbox(frame, annotations)

        # Calculate FPS, then visualize it.
        fps, latency = fps_calculator.measure()
        frame = cv2.putText(frame, '{} fps'.format(fps), (0, 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        frame = cv2.putText(frame, '{} ms'.format(latency), (0, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        cv2.imshow('frame', frame)

        key = cv2.waitKey(1)
        if key & 0xFF == ord('q') or key == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
コード例 #7
0
def main():
    #default_video = '/home/mendel/dominadas-autoML/automl-videoOnDevice/data/2020-08-30_1744.mp4'
    #default_video = '/home/mendel/dominadas-autoML/automl-videoOnDevice/data/2020-08-22_1537.mp4'
    #default_video = '/home/mendel/dominadas-autoML/automl-videoOnDevice/data/2020-08-18_1545.mp4'
    #default_video = '/home/mendel/dominadas-autoML/automl-videoOnDevice/data/2020-08-18_1743.mp4'
    default_video = '/home/mendel/dominadas-autoML/automl-videoOnDevice/data/2020-09-04_1727.mp4'

    default_model = 'data/traffic_model_edgetpu.tflite'
    default_labels = 'data/traffic_label_map.pbtxt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', help='model path', default=default_model)
    parser.add_argument('--labels',
                        help='label file path',
                        default=default_labels)
    parser.add_argument('--input_video',
                        help='input video file path',
                        default=default_video)
    parser.add_argument('--output_video',
                        help='output video file path',
                        default='')
    parser.add_argument('--threshold',
                        type=float,
                        default=0.2,
                        help='class score threshold')
    parser.add_argument('--use_tracker',
                        type=bool,
                        default=False,
                        help='use an object tracker')
    args = parser.parse_args()

    print('Loading %s with %s labels.' % (args.model, args.labels))

    config = vot.ObjectTrackingConfig(
        score_threshold=args.threshold,
        tracker=vot.Tracker.BASIC if args.use_tracker else vot.Tracker.NONE)
    engine = vot.load(args.model, args.labels, config)
    input_size = engine.input_size()

    cap = cv2.VideoCapture(args.input_video)

    #My Dominada Counter
    dominadaCount = 0

    writer = None
    if cap.isOpened() and args.output_video:
        writer = cv2.VideoWriter(args.output_video,
                                 cv2.VideoWriter_fourcc(*'mp4v'),
                                 cap.get(cv2.CAP_PROP_FPS),
                                 (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                                  int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))

    timestamp = 0
    while cap.isOpened():
        ret, frame = cap.read()

        if not ret:
            break

        # Resizes frame.
        resized_frame = cv2.resize(frame,
                                   (input_size.width, input_size.height))

        # Calculates current microsecond for timestamp.
        timestamp = int(timestamp +
                        (1 / cap.get(cv2.CAP_PROP_FPS)) * 1000 * 1000)

        # Run inference engine to populate annotations array.
        annotations = []
        if engine.run(timestamp, resized_frame, annotations):
            frame = utils.render_bbox(frame, annotations)
            print("JEGF CODE")
            #print("Frame:")
            #print(frame)
            print("annotations:")
            '''
      for annotation in annotations:
        #print(annotation.class_name+str(annotation.confidence_score))
        if(annotation.class_name=="dominada"):
          print(annotation.class_name+str(annotation.confidence_score))

        if(annotation.confidence_score>=.53125):
          print("More than 53125%")
          if(annotation.class_name=="dominada"):
            dominadaCount = dominadaCount+1;

    frame = cv2.putText(frame, ('Dominadas:'+str(dominadaCount)), (0, 66),cv2.FONT_HERSHEY_SIMPLEX, 1.1, (0, 0, 255), 2)
      '''
        if writer:
            writer.write(frame)
        else:
            cv2.imshow('frame', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    if writer:
        writer.release()
    else:
        cv2.destroyAllWindows()
    cap.release()