def main(argv):

    parse_args(argv)

    Config.model_labels = parse_labels(Config.model_labels_file)
    Config.label_colors = get_label_colors(Config.model_labels)

    print_summary()

    # open video source
    cap = get_video(Config.source, Config.video_path)

    print("Video Properties:")
    print("Loaded {} video from source {}".format(Config.source,
                                                  Config.video_path))

    # get frame width/height
    actual_frame_width = cap.get(cv.CAP_PROP_FRAME_WIDTH)
    actual_frame_height = cap.get(cv.CAP_PROP_FRAME_HEIGHT)

    print("Video Resolution {} x {}".format(actual_frame_width,
                                            actual_frame_height))

    # load NN model for platform
    if Config.platform == 'cpu':
        if Config.framework == 'caffe':
            net = cv.dnn.readNetFromCaffe(Config.model_file,
                                          Config.model_weight_file)
        elif Config.framework == 'tensorflow':
            net = cv.dnn.readNetFromTensorflow(Config.model_file,
                                               Config.model_weight_file)
        else:
            print("{} Framework not supported".format(Config.framework))
            sys.exit(2)

    elif Config.platform == 'movidius':
        mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)
        # Get a list of ALL the sticks that are plugged in
        mv_devices = mvnc.EnumerateDevices()
        if len(mv_devices) == 0:
            print('No Movidius devices found')
            sys.exit(2)
        else:
            print('{} Movidius Device/s Found'.format(len(mv_devices)))

        # Pick the first stick to run the network
        movidius = mvnc.Device(mv_devices[0])
        movidius.OpenDevice()

        with open(Config.model_weight_file, mode='rb') as f:
            graph_data = f.read()

        # allocate the Graph instance from NCAPI by passing the memory buffer
        movidius_graph = movidius.AllocateGraph(graph_data)
    else:
        print("{} Platform not supported".format(Config.platform))
        print('-d, --device cpu|movidius : hw platform')
        sys.exit(2)

    # Start Counting frames to Calculate FPS
    frame_count = 0
    start_time = time.time()
    inferred_frame_count = 0

    cv.namedWindow("Real Time Object Detection", cv.WINDOW_FULLSCREEN)

    detections = None

    while True:
        # read frame from capture
        has_frame, frame = cap.read()

        if not has_frame:
            end_time = time.time()
            print("No more frame from from video source, exiting ....")
            break

        if Config.infer_frame_rate == -1 or inferred_frame_count / (
                time.time() - start_time) < Config.infer_frame_rate:
            resized_frame = cv.resize(
                frame, (Config.model_image_height, Config.model_image_width))
            if Config.platform == 'cpu':
                detections = infer_with_cpu(resized_frame, net)
            elif Config.platform == 'movidius':
                detections = infer_with_movidius(resized_frame, movidius_graph)
            else:
                print('Platform not found')
                sys.exit(2)

            inferred_frame_count += 1

        if detections is not None:
            frame = post_process(frame, detections)

        # display text to let user know how to quit
        cv.rectangle(frame, (0, 0), (220, 60), (50, 50, 50, 100), -1)
        cv.putText(frame, "Q to Quit", (10, 12), cv.FONT_HERSHEY_SIMPLEX, 0.4,
                   (255, 255, 255), 1)
        cv.putText(
            frame, 'CPU Count: {} - CPU% : {} '.format(psutil.cpu_count(),
                                                       process.cpu_percent()),
            (10, 35), cv.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
        cv.putText(frame,
                   'Inference Frame Rate: {}'.format(Config.infer_frame_rate),
                   (10, 55), cv.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)

        cv.imshow('Real Time Object Detection', frame)

        frame_count += 1

        if cv.waitKey(Config.fps_delay) & 0xFF == ord('q'):
            end_time = time.time()
            break
        #TODO: UP Key to increase IPF , DOWN Key to decrease IPF

    frames_per_second = frame_count / (end_time - start_time)
    print('Calculated Frames Per Second: ' + str(frames_per_second))

    cv.destroyAllWindows()

    # Release Movidius Device Allocation
    if Config.platform == 'movidius':
        movidius_graph.DeallocateGraph()
        movidius.CloseDevice()

    cap.release()
    def __init__(self):
        super(PeopleDetectionNode, self).__init__()

        # init the node
        rospy.init_node('people_object_detection', anonymous=False)
        rospy.loginfo("Starting NCS People detecter...")

        # Get parameters from launch file
        network_graph_path = rospy.get_param('~network_graph_path', "")
        #confirm path to ssd_mobilenet_graph file was supplied
        if network_graph_path:
            rospy.loginfo('Found network_graph_path: ' + network_graph_path)
        else:
            rospy.logerr("network_graph_path param is required!")
            quit()
        # NOTE: We use the 'ncappzoo/apps/video_objects/graph', local copy stored at:
        # '.../ncs_people_detection/network_graphs/ssd_mobilenet_graph'

        # Get other parameters from YAML file
        camera_rgb_topic = rospy.get_param("~camera_rgb_topic",
                                           "/cv_camera/image_raw")
        camera_depth_topic = rospy.get_param("~camera_depth_topic", "")
        video_file_path = rospy.get_param("~video_file_path", "")
        self.show_cv_debug_window = False
        self.show_cv_debug_window = rospy.get_param("~show_cv_debug_window",
                                                    False)

        self.incoming_image_msg = None
        self.incoming_depth_msg = None
        self.cv_image = None
        self._bridge = CvBridge()
        self.skip_frame_count = 0

        # Advertise the result of Object Detector (COB format)
        self.pub_detections = rospy.Publisher('/object_detection/detections', \
            DetectionArray, queue_size=1)
        # and the marked up image
        self.pub_detections_image = rospy.Publisher(\
            '/object_detection/detections_image', Image, queue_size=1)

        # Advertise the BodyTracker message (same as Nuitrack node)
        self.pub_body_tracker = rospy.Publisher('/body_tracker/position', \
            BodyTracker, queue_size=1)

        # Advertise the BodyTrackerArray message (same as Nuitrack node, but an array!)
        self.pub_body_tracker_array = rospy.Publisher('/body_tracker_array/position', \
            BodyTrackerArray, queue_size=1)

        # configure the NCS
        mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)

        # Get a list of ALL the sticks that are plugged in
        devices = mvnc.EnumerateDevices()
        if len(devices) == 0:
            rospy.logerr('*** No Movidius NCS devices found!  Exiting! ***')
            quit()

        # Pick the first stick to run the network
        device = mvnc.Device(devices[0])

        # Open the NCS
        device.OpenDevice()

        # Load graph file to memory buffer
        with open(network_graph_path, mode='rb') as f:
            graph_data = f.read()

        # allocate the Graph instance from NCAPI by passing the memory buffer
        self.ssd_mobilenet_graph = device.AllocateGraph(graph_data)

        # open the camera or video file
        if not video_file_path or video_file_path == "no" or video_file_path == "cam":

            # Subscribe to the live video messages

            if camera_depth_topic:
                # When depth is specified, synchronize RGB and Depth frames
                # warning!  if used, but no depth camera, RGB will never show up!

                # Subscribe to approx synchronized rgb and depth frames
                self.sub_rgb = message_filters.Subscriber(
                    camera_rgb_topic, Image)
                self.sub_depth = message_filters.Subscriber(
                    camera_depth_topic, Image)

                # Create the message filter
                ts = message_filters.ApproximateTimeSynchronizer(\
                    [self.sub_rgb, self.sub_depth], 2, 0.9)
                ts.registerCallback(self.rgb_and_depth_callback)
                rospy.loginfo('Subscribing to SYNCHRONIZED RGB: ' + \
                camera_rgb_topic + " and Depth: " + camera_depth_topic)

            else:
                # no depth topic, RGB only

                self.sub_rgb = rospy.Subscriber(camera_rgb_topic,\
                    Image, self.rgb_callback, queue_size=1, buff_size=2**24)
                rospy.loginfo('Subscribing to camera_rgb_topic: ' +
                              camera_rgb_topic)

        else:
            rospy.logwarn("READING FROM VIDEO FILE INSTEAD OF ROS MESSAGES")
            self.read_from_video(video_file_path)

        # spin
        rospy.spin()
Example #3
0
def main():
    global graph_filename, resize_output, resize_output_width, resize_output_height

    if (len(argv) < 2):
        print("No Graph is provided")
        print_usage()
        return 1

    if (not handle_args()):
        print_usage()
        return 1

    # configure the NCS
    mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)

    # Get a list of ALL the sticks that are plugged in
    devices = mvnc.EnumerateDevices()
    if len(devices) == 0:
        print('No devices found')
        quit()

    # Pick the first stick to run the network
    device = mvnc.Device(devices[0])

    # Open the NCS
    device.OpenDevice()

    #graph_filename = sys.argv[1]
    #graph_filename = '/home/saikrishnas/workSpace/Traffic_sign_Dt/inference_graph_ssd_inception_47classes/graph'

    # Load graph file to memory buffer
    with open(graph_filename, mode='rb') as f:
        graph_data = f.read()

    # allocate the Graph instance from NCAPI by passing the memory buffer
    ssd_mobilenet_graph = device.AllocateGraph(graph_data)

    cv2.namedWindow(cv_window_name)
    cv2.moveWindow(cv_window_name, 10, 10)

    exit_app = False
    while (True):
        cap = cv2.VideoCapture("./traffic_sign2.mp4")

        actual_frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        actual_frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        print('actual video resolution: ' + str(actual_frame_width) + ' x ' +
              str(actual_frame_height))

        if ((cap == None) or (not cap.isOpened())):
            print('Could not open video device.  Make sure file exists:')
            print('file name:' + input_video_file)
            print('Also, if you installed python opencv via pip or pip3 you')
            print(
                'need to uninstall it and install from source with -D WITH_V4L=ON'
            )
            print('Use the provided script: install-opencv-from_source.sh')
            exit_app = True
            break

        frame_count = 0
        start_time = time.time()
        end_time = start_time

        while (True):
            ret, display_image = cap.read()

            if (not ret):
                end_time = time.time()
                print("No image from from video device, exiting")
                break

            # check if the window is visible, this means the user hasn't closed
            # the window via the X button
            prop_val = cv2.getWindowProperty(cv_window_name,
                                             cv2.WND_PROP_ASPECT_RATIO)
            if (prop_val < 0.0):
                end_time = time.time()
                exit_app = True
                break

            run_inference(display_image, ssd_mobilenet_graph)

            if (resize_output):
                display_image = cv2.resize(
                    display_image, (resize_output_width, resize_output_height),
                    cv2.INTER_LINEAR)
            cv2.imshow(cv_window_name, display_image)

            raw_key = cv2.waitKey(1)
            if (raw_key != -1):
                if (handle_keys(raw_key) == False):
                    end_time = time.time()
                    exit_app = True
                    break
            frame_count += 1

        frames_per_second = frame_count / (end_time - start_time)
        print('Frames per Second: ' + str(frames_per_second))

        cap.release()

        if (exit_app):
            break

    # Clean up the graph and the device
    ssd_mobilenet_graph.DeallocateGraph()
    device.CloseDevice()

    cv2.destroyAllWindows()
Example #4
0
def main():
    global resize_output, resize_output_width, resize_output_height

    if (not handle_args()):
        print_usage()
        return 1

    # configure the NCS
    mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)

    # Get a list of ALL the sticks that are plugged in
    devices = mvnc.EnumerateDevices()
    if len(devices) == 0:
        print('No devices found')
        quit()

    # Pick the first stick to run the network
    device = mvnc.Device(devices[0])

    # Open the NCS
    device.OpenDevice()

    graph_filename = 'graph'

    # Load graph file to memory buffer
    with open(graph_filename, mode='rb') as f:
        graph_data = f.read()

    # allocate the Graph instance from NCAPI by passing the memory buffer
    ssd_mobilenet_graph = device.AllocateGraph(graph_data)

    # get list of all the .mp4 files in the image directory
    # input_video_filename_list = os.listdir(input_video_path)
    # input_video_filename_list = [i for i in input_video_filename_list if i.endswith('.mp4')]

    # if (len(input_video_filename_list) < 1):
    #     # no images to show
    #     print('No video (.mp4) files found')
    #     return 1

    cv2.namedWindow(cv_window_name)
    cv2.moveWindow(cv_window_name, 10, 10)

    cap = cv2.VideoCapture(CAMERA_INDEX)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, REQUEST_CAMERA_WIDTH)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, REQUEST_CAMERA_HEIGHT)

    actual_frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    actual_frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    print('actual video resolution: ' + str(actual_frame_width) + ' x ' +
          str(actual_frame_height))

    if ((cap == None) or (not cap.isOpened())):
        print('Could not open camera.  Make sure it is plugged in.')
        # print ('file name:' + input_video_file)
        print('Also, if you installed python opencv via pip or pip3 you')
        print(
            'need to uninstall it and install from source with -D WITH_V4L=ON')
        print('Use the provided script: install-opencv-from_source.sh')
        exit_app = True
        return
    exit_app = False
    while (True):
        #for input_video_file in input_video_filename_list :
        # cap = cv2.VideoCapture(input_video_file)

        frame_count = 0
        start_time = time.time()
        end_time = start_time

        while (True):
            ret, display_image = cap.read()

            if (not ret):
                end_time = time.time()
                print("No image from from video device, exiting")
                break

            # check if the window is visible, this means the user hasn't closed
            # the window via the X button
            prop_val = cv2.getWindowProperty(cv_window_name,
                                             cv2.WND_PROP_ASPECT_RATIO)
            if (prop_val < 0.0):
                end_time = time.time()
                exit_app = True
                break

            run_inference(display_image, ssd_mobilenet_graph)

            if (resize_output):
                display_image = cv2.resize(
                    display_image, (resize_output_width, resize_output_height),
                    cv2.INTER_LINEAR)
            cv2.imshow(cv_window_name, display_image)

            raw_key = cv2.waitKey(1)
            if (raw_key != -1):
                if (handle_keys(raw_key) == False):
                    end_time = time.time()
                    exit_app = True
                    break
            frame_count += 1

        frames_per_second = frame_count / (end_time - start_time)
        print('Frames per Second: ' + str(frames_per_second))

        cap.release()

        if (exit_app):
            break

        if (exit_app):
            break

    # Clean up the graph and the device
    ssd_mobilenet_graph.DeallocateGraph()
    device.CloseDevice()

    cv2.destroyAllWindows()