Пример #1
0
def main():
    # args
    config_filename = sys.argv[1]  # 0 based
    config = gen_util.read_app_config(config_filename)

    # set the global values
    settings.init(config_filename)

    # camera config - list all found in the configuratio0n
    camera_config_list = config['camera']
    camera_count = len(camera_config_list)
    for i, camera_config in enumerate(camera_config_list):
        print(i, camera_config, '\n')
    # choose a camera
    camera_id = int(input("Enter Camera ID> "))

    # review image regions
    camera_config = camera_config_list[camera_id]
    regions_config = camera_config['regions']
    for i, region_config in enumerate(regions_config):
        print(
            "top left corner (y, x): ({}, {})   region size (width, height): ({}, {})"
            .format(region_config[0], region_config[1], region_config[2],
                    region_config[3]))

    print(
        "\n - - If you don't like these values, edit app_*.json and rerun this - - "
    )
    resize_dimensions = [(640, 480), (1200, 720), (1920, 1440)]
    resize_input = int(
        input("Resize Factor (width, height), \n{}\nEnter 0,1,2 > ".format(
            resize_dimensions)))
    dim = resize_dimensions[resize_input]

    # must be a honeywell
    print(f'camera mfr = {camera_config["mfr"]}')
    assert camera_config['mfr'] == "Honeywell"

    # open the video stream
    stream = False
    video_stream = camera_util.open_video_stream(camera_id, camera_config,
                                                 stream)

    for i in range(2500):
        start_time = time.perf_counter()

        # Honeywell video stream
        frame = camera_util.get_camera_full(camera_id, video_stream)
        camera_name, np_images, is_color = camera_util.get_camera_regions_from_full(
            frame, camera_id, camera_config, stream)

        #camera_name, np_images, is_color = camera_util.get_camera_regions(camera_id, camera_config, False)
        print(" {:04d}  main -- camera: {}  secs: {:02.2f}".format(
            i, camera_name, (time.perf_counter() - start_time)))

        if np_images is not None:
            for i, np_image in enumerate(np_images):
                # img_bgr = cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR)
                region_name = 'region_{}'.format(i)

                resized_img = cv2.resize(np_image,
                                         dim,
                                         interpolation=cv2.INTER_AREA)
                cv2.imshow(region_name, resized_img)
        else:
            # troubleshooting tips
            print("- - NO Image returned - - ")
            print(" - access via Reolink app on phone")
            print(" - check IP of camera")
            print(
                " - edit: camera_util.get_reolink_snapshot print out the HTTP string"
            )
            print("         and test in a browser")

        # time.sleep(1)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
Пример #2
0
def main():
    # args
    camera_number = int(sys.argv[1])  # 0 based

    # get the app config - including passwords
    config = gen_util.read_app_config('app_config.json')

    # set some flags based on the config
    run_inferences = config["run_inferences"]
    save_inference = config["save_inference"]
    annotation_dir = config["annotation_dir"]
    snapshot_dir = config["snapshot_dir"]

    # set up tflite model
    global label_dict
    label_dict = label_map_util.get_label_map_dict(config['label_map'], 'id')

    global interpreter
    interpreter = tensorflow_util.get_tflite_interpreter(
        'model/output_tflite_graph.tflite')

    global model_image_dim, model_input_dim, output_details
    model_image_dim, model_input_dim, output_details = tensorflow_util.get_tflite_attributes(
        interpreter)

    # define your paths here - just once (not in the loop)
    global image_path, annotation_path
    image_path = os.path.abspath(os.path.join(cwd, snapshot_dir))
    annotation_path = os.path.abspath(os.path.join(cwd, annotation_dir))

    # Set up Camera
    # TODO - should be a list
    #   - but it's just one camera now

    # for name, capture, flip in camera_list:
    camera_config = camera_util.get_camera_config(config, camera_number)
    camera_name = camera_config['name']
    url = camera_util.get_reolink_url(
        'http', camera_config['ip'])  # pass the url base - not just the ip
    print("Camera Config:", camera_config)

    # based on the config, config all camera regions
    # - includes building the bbox stacks
    regions, bbox_stack_list, bbox_push_list = camera_util.config_camera_regions(
        camera_config)

    snapshot_count = 0
    while True:

        start_time = time.time()
        base_name = "{}_{}".format(str(int(start_time)), camera_number)
        # frame returned as a numpy array ready for cv2
        # not resized
        angle = camera_config['rotation_angle']
        frame = camera_util.get_reolink_snapshot(url,
                                                 camera_config['username'],
                                                 camera_config['password'])

        if frame is not None:
            frame = imutils.rotate(frame, angle)  # rotate frame
            orig_image_dim = (frame.shape[0], frame.shape[1]
                              )  #  dim = (height, width),
            orig_image = frame.copy(
            )  # preserve the original - full resolution
            # corner is top left

            print(
                '\n-- {} snap captured: {}'.format(snapshot_count,
                                                   frame.shape),
                '{0:.2f} seconds'.format(time.time() - start_time))

            # True == run it through the model
            if run_inferences:
                inference_start_time = time.time()
                # loop through 0:n sub-regions of the frame
                # last one is the full resolution
                for i, region in enumerate(regions):
                    crop_start_time = time.time()

                    inference_image, detected_objects, bbox_array = run_inference(
                        orig_image, base_name, region, i, bbox_stack_list,
                        bbox_push_list, True)
                    print(
                        '     crop {}'.format(i),
                        ' inference: {0:.2f} seconds'.format(time.time() -
                                                             crop_start_time))
                    # enlarged_inference = cv2.resize(inference_image, (1440, 1440), interpolation = cv2.INTER_AREA)
                    window_name = "{} crop {}".format(camera_name, i)
                    cv2.imshow(window_name,
                               inference_image)  # show the inferance

                print('   TOTAL inference: {0:.2f} seconds'.format(
                    time.time() - inference_start_time))

            else:
                cv2.imshow(camera_name, frame)
            snapshot_count = snapshot_count + 1
        else:
            print("-- no frame returned -- ")

        # time.sleep(3)

        # Use key 'q' to close window
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
Пример #3
0
import cv2

import camera_util
import gen_util


# camera settings

# get the app config - including passwords
config = gen_util.read_app_config('app_config.json')

# -- configure the cameras --
camera_list = []   # list of camera objects - from which you will capture
camera_count = 0

# loop through the cameras found in the json 
for camera in config['camera']:
    start_time = time.time()
    print (camera['name'], start_time)
    # get the VideoCapture object for this camera
    capture = (camera_util.get_camera(
        camera['ip'],
        camera['port'],
        camera['username'],
        camera['password']
    ))
    # create cv2 named windows - window name = camera name
    cv2.namedWindow(camera['name'], cv2.WINDOW_NORMAL)

    # camera tuple = (name, VideoCature object, flip)
    camera_tuple = (camera['name'], capture, camera['flip'])
Пример #4
0
def main():
    # get the app config - including passwords
    config = gen_util.read_app_config('app_config.json')

    # set some flags based on the config
    run_inference = config["run_inference"]
    save_inference = config["save_inference"]
    annotation_dir = config["annotation_dir"]
    snapshot_dir = config["snapshot_dir"]

    # set up camerass
    camera_list = camera_util.configure_cameras(config)

    # set up tflite model
    label_dict = label_map_util.get_label_map_dict(config['label_map'], 'id')
    interpreter = tensorflow_util.get_tflite_interpreter(
        'model/output_tflite_graph.tflite')
    model_image_dim, model_input_dim, output_details = tensorflow_util.get_tflite_attributes(
        interpreter)

    # define your paths here - just once (not in the loop)
    image_path = os.path.abspath(os.path.join(cwd, snapshot_dir))
    annotation_path = os.path.abspath(os.path.join(cwd, annotation_dir))

    run_with_camera_number = 0  # 0 based

    snapshot_count = 0
    while True:

        # for name, capture, flip in camera_list:
        name, capture, flip = camera_list[
            run_with_camera_number]  # running with 1 camera only
        start_time = time.time()
        print(name, snapshot_count)

        ret, frame = capture.read()  #  frame.shape (height, width, depth)

        if frame is not None:
            orig_image_dim = (frame.shape[0], frame.shape[1]
                              )  #  dim = (height, width),
            orig_image = frame.copy()
            snapshot_count = snapshot_count + 1

            print('captured:', frame.shape, time.time() - start_time)

            if flip == "vert":
                frame = cv2.flip(frame, 0)

            # True == run it through the model
            if run_inference:
                # pre-process the frame -> a compatible numpy array for the model
                preprocessed_image = tensorflow_util.preprocess_image(
                    frame, interpreter, model_image_dim, model_input_dim)
                bbox_array, class_id_array, prob_array = tensorflow_util.send_image_to_model(
                    preprocessed_image, interpreter)
                print('inference:', frame.shape, time.time() - start_time)

                inference_image, orig_image_dim, detected_objects = display.inference_to_image(
                    frame, bbox_array, class_id_array, prob_array,
                    model_input_dim, label_dict, PROBABILITY_THRESHOLD)

                # testing the format
                # convert detected_objexts to XML
                # detected_objects = list [ (class_id, class_name, probability, xmin, ymin, xmax, ymax)]
                if len(detected_objects) > 0:
                    print(detected_objects)
                    if save_inference:
                        image_base_name = str(int(start_time))
                        image_name = os.path.join(image_path,
                                                  image_base_name + '.jpg')
                        annotation_name = os.path.join(
                            annotation_path, image_base_name + '.xml')
                        print("saving:", image_name, frame.shape,
                              annotation_name)
                        # original image - h: 480  w: 640
                        cv2.imwrite(image_name, orig_image)
                        # this function generates & saves the XML annotation
                        annotation_xml = annotation.inference_to_xml(
                            name, image_name, orig_image_dim, detected_objects,
                            annotation_dir)

                # enlarged_inference = cv2.resize(inference_image, (1440, 1440), interpolation = cv2.INTER_AREA)
                cv2.imshow(name, inference_image)  # show the inferance
                # cv2.imshow(name, orig_image)     # show the raw image from the camera
            else:
                cv2.imshow(name, frame)
        else:
            print("-- no frame returned -- ")

        # time.sleep(3)

        # Use key 'q' to close window
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Пример #5
0
def init(config_filename):

    #
    # T O P    L E V E L   
    #    app config 
    # 
    global config
    config = gen_util.read_app_config(config_filename)

    global run_inferences, save_inference, annotation_dir, snapshot_dir
    run_inferences = config["run_inferences"]
    save_inference = config["save_inference"]
    annotation_dir = config["annotation_dir"]
    snapshot_dir = config["snapshot_dir"]
    status_dir = config["status_dir"]

    # global image_path
    global image_path, annotation_path, status_path
    image_path = os.path.abspath(os.path.join(cwd, snapshot_dir))
    annotation_path = os.path.abspath(os.path.join(cwd, annotation_dir))
    status_path = os.path.abspath(os.path.join(cwd, status_dir))

    # Queues
    global imageQueue, faceQueue
    imageQueue = queue.Queue()
    faceQueue = queue.Queue()

    global run_state
    run_state = True

    global safe_print, safe_imshow
    safe_print = threading.Lock()
    safe_imshow = threading.Lock()

    global safe_stack_update
    safe_stack_update = threading.Lock()

    # AWS
    global aws_session, aws_profile
    aws_profile = config["aws_profile"]
    aws_session = aws_util.get_session()

    global aws_s3_public_image
    aws_s3_public_image = config["aws_s3_public_image"]

    # faces
    global facial_detection_enabled
    global last_recognized_face_id
    global last_recognized_face_time

    if config["facial_detection_enabled"] == "True":
        facial_detection_enabled = True 
    last_recognized_face_id = 0
    last_recognized_face_time = 0.0

    # new object IoU Threshold
    global iou_threshold
    iou_threshold = 0.8

    # keep inference threshold
    global inference_threshold
    inference_threshold = config["inference_threshold"]

    # universal sleep factor
    # - base multiplier to make the cameras sleep
    #   increase this if the imageQueue gets too big
    global universal_sleep_factor
    universal_sleep_factor = 0.01

    # image auto correct
    # matrix
    # - rows = camera
    # - columns = regions
    # 8 regions max
    # 0.0 == don't autocorrect
    global color_image_auto_correct_clips_array
    color_clip_list = config["color_image_auto_correct_clips"]
    color_image_auto_correct_clips_array = np.asarray(color_clip_list)

    global gray_image_auto_correct_clips_array
    gray_clip_list = config["gray_image_auto_correct_clips"]
    gray_image_auto_correct_clips_array = np.asarray(gray_clip_list)

   
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
    # S T A T U S    T R A C K I N G
    #

    global safe_status_update
    safe_status_update = threading.Lock()
    
    global configured_history_map, history_row_count, row_num_dict
    configured_history_map, history_row_count, row_num_dict = status.configure_history_map(status.status_history_dict)

    global home_status
    home_status = status.Status(time.time())

    # - - - - - - - - 
    # H U E  
    #  light control
    #  !!! - to update, you need to get_bridge each time (which gets you the bridge data)

    global hue_bridge_ip, hue_bridge_username
    hue_bridge_ip = config['hue_bridge_ip']
    hue_bridge_username = config['hue_bridge_username']

    global hue, lights, light_groups
    # get the bridge
    hue = hue_util.get_bridge(hue_bridge_ip, hue_bridge_username)
    lights = hue_util.get_lights(hue)
    light_groups = hue_util.get_groups(hue)

    global front_porch_group_id, back_porch_group_id, bar_group_id, sunroom_group_id
    front_porch_group_id = hue_util.get_group_id(light_groups, "Front Porch")
    back_porch_group_id = hue_util.get_group_id(light_groups, "back porch")
    bar_group_id = hue_util.get_group_id(light_groups, "Bar")
    sunroom_group_id = hue_util.get_group_id(light_groups, "sunroom")

    global light_group_status
    light_group_status = {"front_porch" : 0, "bar" : 0, "sunroom" : 0}
Пример #6
0
def main():
    # args
    config_filename = sys.argv[1]  # 0 based
    config = gen_util.read_app_config(config_filename)

    # set the global values
    settings.init(config_filename)

    # camera config - list all found in the configuratio0n
    camera_config_list = config['camera']
    camera_count = len(camera_config_list)
    for i, camera_config in enumerate(camera_config_list):
        print(i, camera_config, '\n')
    # choose a camera
    camera_id = int(input("Enter Camera ID> "))

    # review image regions
    camera_config = camera_config_list[camera_id]
    regions_config = camera_config['regions']
    for i, region_config in enumerate(regions_config):
        print(
            "top left corner (y, x): ({}, {})   region size (width, height): ({}, {})"
            .format(region_config[0], region_config[1], region_config[2],
                    region_config[3]))

    print(
        "\n - - If you don't like these values, edit app_*.json and rerun this - - "
    )
    resize_dimensions = [(640, 480), (1200, 720), (1920, 1440)]
    resize_input = int(
        input("Resize Factor (width, height), \n{}\nEnter 0,1,2 > ".format(
            resize_dimensions)))
    dim = resize_dimensions[resize_input]

    # if camera has PTZ functionality
    # - only camera_id = 5
    if camera_id == 5:
        # login to specified camera
        ip = camera_config['ip']
        username = camera_config['username']
        password = camera_config['password']
        camera_5 = Camera(ip, username, password)
        print("camera #5 logged in & instantiated")
        # get a zoom factor
        zoom_instruction = int(
            input(
                "Enter seconds to zoom (+X to zoom in, -X to zoom out, 0 do do thing)"
            ))
        if zoom_instruction > 0:
            camera_5.start_zooming_in()
            time.sleep(zoom_instruction)
            camera_5.stop_zooming()
        if zoom_instruction < 0:
            camera_5.start_zooming_out()
            time.sleep(-zoom_instruction)
            camera_5.stop_zooming()

    for i in range(250):
        start_time = time.perf_counter()
        camera_name, np_images, is_color = camera_util.get_camera_regions(
            camera_id, camera_config, False)
        print(" {:04d}  main -- camera: {}  secs: {:02.2f}".format(
            i, camera_name, (time.perf_counter() - start_time)))

        if np_images is not None:
            for i, np_image in enumerate(np_images):
                # img_bgr = cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR)
                region_name = 'region_{}'.format(i)

                resized_img = cv2.resize(np_image,
                                         dim,
                                         interpolation=cv2.INTER_AREA)
                cv2.imshow(region_name, resized_img)
        else:
            # troubleshooting tips
            print("- - NO Image returned - - ")
            print(" - access via Reolink app on phone")
            print(" - check IP of camera")
            print(
                " - edit: camera_util.get_reolink_snapshot print out the HTTP string"
            )
            print("         and test in a browser")

        # time.sleep(1)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break