示例#1
0
文件: detect.py 项目: fmiusov/ssd-dag
                    for i,obj in enumerate(ans):
                        box = obj.bounding_box.flatten().tolist()
                        bbox_list.append(box)
                        class_id_list.append(obj.label_id)
                        prob_list.append(obj.score)
                    bbox_array = array(bbox_list)
                    class_id_array = array(class_id_list)
                    prob_array = array(prob_list)

                inference_image, orig_image_dim, detected_objects = inference_to_image(model_type, logger, 
                    image_filepath,
                    bbox_array, class_id_array, prob_array, 
                    model_input_dim, label_dict, PROBABILITY_THRESHOLD)

                if args.display == 'gtk':
                    cv2.namedWindow('Object Detect')
                    image = cv2.cvtColor(inference_image, cv2.COLOR_RGB2BGR)
                    cv2.imshow('Object Detect', cv2.resize(image, (640,480)))
                    cv2.waitKey(0)
                elif args.display == 'None':
                    pass
                else:
                    pass
                if args.annotation_dir != None:
                    print ("detected objects:", detected_objects)
                    image_basename = os.path.basename(image_filepath)
                    annotation_xml = inference_to_xml(args.image_dir, image_basename, orig_image_dim, detected_objects, args.annotation_dir)
                    


示例#2
0
def image_consumer(consumer_id, sess, tensor_dict, image_tensor,
                   bbox_stack_lists, bbox_push_lists, model_input_dim,
                   label_dict):

    log.info(f'IMAGE-CONSUMER started #{consumer_id}')
    # configuration
    facial_detection_regions = convert_facial_lists(
        settings.config["facial_detection_regions"]
    )  # list of lists converted to list of tuple-2  (camera_id, regions_id)

    while True:
        new_objects = 0  # default to 0
        try:
            # Consumer tasks
            # - once/frame
            #    - ALL regions of the frame
            data = settings.imageQueue.get(block=False)
            camera_id, camera_name, image_time, np_images, is_color = data
            pushed_to_face_queue = False
            start = time.perf_counter()
            # loop through the regions in the frame
            for region_id, np_image in enumerate(np_images):
                orig_image = np_image.copy(
                )  # np_image will become inference image - it's NOT immutable
                np_image_expanded = np.expand_dims(np_image, axis=0)
                # -- run model
                inf = tensorflow_util.send_image_to_tf_sess(
                    np_image_expanded, sess, tensor_dict, image_tensor)
                # get data for relavant detections
                num_detections = inf.prob_array.shape[0]

                # check for new detections
                # - per camera - per region
                det = None  # you need None, if saving inferences (no detection) on all images
                if num_detections > 0:
                    # need bbox_array as a numpy
                    log.debug(
                        f'image_consumer#{consumer_id} - cam#{camera_id}  reg#{region_id}  bbox_stack_list len: {len(bbox_stack_lists)}'
                    )
                    log.debug(
                        f'-- bbox_stack_lists[{camera_id}] => bbox_stack_list')
                    log.debug(
                        f'-- bbox_stack_list: {bbox_stack_lists[camera_id]}')
                    with settings.safe_stack_update:
                        new_objects, dup_objects = tensorflow_util.identify_new_detections(
                            image_time, settings.iou_threshold, camera_id,
                            region_id, inf.bbox_array,
                            bbox_stack_lists[camera_id],
                            bbox_push_lists[camera_id])
                # D E T E C T I O N  class
                # - create a detection & update home_status
                det = inference.RegionDetection(image_time, camera_id,
                                                region_id, is_color,
                                                num_detections, new_objects,
                                                inf)
                # update regardless
                with settings.safe_status_update:
                    # - - - - - - - UPDATING status & history - - - - - - - -
                    #   called as a per camera:region function
                    settings.home_status.update_from_detection(det)

                # display -
                # NOTE - Displaying w/ cv2 in multi-threads is a problem!!   1 consumer only if you want to enable this
                # window_name = '{}-{}'.format(camera_name, region_id)
                if num_detections > 0:
                    image = np_image
                    # TODO - get rid of the threshold
                    probability_threshold = 0.7
                    inference_image, orig_image_dim, detected_objects = display.inference_to_image(
                        image, inf, model_input_dim, label_dict,
                        probability_threshold)

                # Facial Detection
                if settings.facial_detection_enabled == True:
                    submit_face_queue = check_faces(
                        camera_id, region_id, facial_detection_regions, inf.
                        class_array)  # is this region in the regions to check?
                    if submit_face_queue == True:
                        settings.faceQueue.put(
                            (camera_id, region_id, image_time, np_image))
                        pushed_to_face_queue = True

                # S A V E
                # - set up a couple of rules for saving
                # default
                rule_num = 2  # priority camera/region w/ new objects
                image_name, annotation_name = inference.get_save_detection_path(
                    rule_num, det, settings.image_path,
                    settings.annotation_path)
                log.info(
                    f'image_consumer/get_save_path: {image_name} {annotation_name}'
                )
                saved = False  # default

                if image_name is not None:
                    # original image - h: 480  w: 640
                    saved = True
                    cv2.imwrite(image_name, orig_image)
                    # this function generates & saves the XML annotation
                    # - if no detection, just save image, skip the annotation - there is no annotation
                    if det is not None:
                        annotation_xml = annotation.inference_to_xml(
                            settings.image_path, image_name, orig_image_dim,
                            detected_objects, settings.annotation_path)

                with settings.safe_print:
                    log.info(
                        f'  IMAGE-CONSUMER:<< {consumer_id} qsize: {settings.imageQueue.qsize()}'
                        f'  cam: {camera_name} reg: {region_id} timestamp {image_time}'
                        f'  inftime:{(time.perf_counter() - start):02.2f} sec  dets: {num_detections} new: {new_objects}  saved: {saved}'
                    )
                    if num_detections > 0:
                        log.debug(f'image_consumer - detection: {det}')
                    if pushed_to_face_queue == True:
                        log.info('      pushed to faceQueue')
                    if saved == True:
                        log.debug(
                            f"      Saved: stale objects - image_name: {image_name}"
                        )
                    else:
                        log.debug(
                            "      No new objects detected --- not saved")

        except queue.Empty:
            pass

        except Exception as e:
            with settings.safe_print:
                log.error(
                    f'  IMAGE-CONSUMER:!!! ERROR - Exception  Consumer ID: {consumer_id}'
                )
                log.error(f'  --  image consumer exception: {e}')
                log.error(traceback.format_exc())

    # stop?
    if settings.run_state == False:
        log.info(f'******* image consummer {consumer_id} shutdown *******')

    return
示例#3
0
def run_inference(image, base_name, region, region_idx, bbox_stack_list,
                  bbox_push_list, save_inference):
    '''
    run the inference with the given image
      image = full size image 
      base_name = time to make the file unique
          you'll append region_id to keep it unique
      region = dimension data to pull out, region = ((ymin, ymax), (xmin, xmax))
             last region is the full image = all 0 values
      region_idx
    save using the {base_name}_{region_idx}
    '''

    if region:
        (ymin, ymax) = region[0]
        (xmin, xmax) = region[1]
        # TODO
        #   after getting rid of orig/fullsize
        #   you can simplify this - you'll always have region
        if ymax > 0:
            image = image[ymin:ymax, xmin:xmax].copy()

    base_name = "{}_{}".format(base_name, region_idx)
    # This is destructive
    # when you do the display inference
    orig_image = image.copy()

    # pre-process the frame -> a compatible numpy array for the model
    preprocessed_image = tensorflow_util.preprocess_image(
        image, interpreter, model_image_dim, model_input_dim)
    # run the model
    bbox_array, class_id_array, prob_array = tensorflow_util.send_image_to_model(
        preprocessed_image, interpreter, PROBABILITY_THRESHOLD)

    # check detected objects against the stack
    new_objects = 0
    dup_objects = 0
    if prob_array is not None:
        match_counts = calc_iou_with_previous(region_idx, bbox_stack_list,
                                              bbox_push_list, bbox_array)
        for match_count in match_counts:
            if match_count >= 3:
                dup_objects = dup_objects + 1
            else:
                new_objects = new_objects + 1

    inference_image, orig_image_dim, detected_objects = display.inference_to_image(
        image, bbox_array, class_id_array, prob_array, model_input_dim,
        label_dict, PROBABILITY_THRESHOLD)

    # if the detected objects were repetitive - don't save the image
    #  get IOU

    # testing the format
    # convert detected_objexts to XML
    # detected_objects = list [ (class_id, class_name, probability, xmin, ymin, xmax, ymax)]
    if len(detected_objects) > 0:
        print("       Objects:", base_name, detected_objects)
        if save_inference and new_objects > 0:
            image_name = os.path.join(image_path, base_name + '.jpg')
            annotation_name = os.path.join(annotation_path, base_name + '.xml')
            # print ("saving:", image_name, image.shape, annotation_name)
            # original image - h: 480  w: 640
            print("  Saved: match count: {}  new objects: {}   image_name: {}".
                  format(match_counts, new_objects, image_name))
            cv2.imwrite(image_name, orig_image)
            # this function generates & saves the XML annotation
            annotation_xml = annotation.inference_to_xml(
                image_path, image_name, orig_image_dim, detected_objects,
                annotation_path)
        elif save_inference and new_objects == 0:
            print("  No new objects detected --- not saved")

    return inference_image, detected_objects, bbox_array
示例#4
0
def main():
    # get the app config - including passwords
    config = gen_util.read_app_config('app_config.json')

    # set some flags based on the config
    run_inference = config["run_inference"]
    save_inference = config["save_inference"]
    annotation_dir = config["annotation_dir"]
    snapshot_dir = config["snapshot_dir"]

    # set up camerass
    camera_list = camera_util.configure_cameras(config)

    # set up tflite model
    label_dict = label_map_util.get_label_map_dict(config['label_map'], 'id')
    interpreter = tensorflow_util.get_tflite_interpreter(
        'model/output_tflite_graph.tflite')
    model_image_dim, model_input_dim, output_details = tensorflow_util.get_tflite_attributes(
        interpreter)

    # define your paths here - just once (not in the loop)
    image_path = os.path.abspath(os.path.join(cwd, snapshot_dir))
    annotation_path = os.path.abspath(os.path.join(cwd, annotation_dir))

    run_with_camera_number = 0  # 0 based

    snapshot_count = 0
    while True:

        # for name, capture, flip in camera_list:
        name, capture, flip = camera_list[
            run_with_camera_number]  # running with 1 camera only
        start_time = time.time()
        print(name, snapshot_count)

        ret, frame = capture.read()  #  frame.shape (height, width, depth)

        if frame is not None:
            orig_image_dim = (frame.shape[0], frame.shape[1]
                              )  #  dim = (height, width),
            orig_image = frame.copy()
            snapshot_count = snapshot_count + 1

            print('captured:', frame.shape, time.time() - start_time)

            if flip == "vert":
                frame = cv2.flip(frame, 0)

            # True == run it through the model
            if run_inference:
                # pre-process the frame -> a compatible numpy array for the model
                preprocessed_image = tensorflow_util.preprocess_image(
                    frame, interpreter, model_image_dim, model_input_dim)
                bbox_array, class_id_array, prob_array = tensorflow_util.send_image_to_model(
                    preprocessed_image, interpreter)
                print('inference:', frame.shape, time.time() - start_time)

                inference_image, orig_image_dim, detected_objects = display.inference_to_image(
                    frame, bbox_array, class_id_array, prob_array,
                    model_input_dim, label_dict, PROBABILITY_THRESHOLD)

                # testing the format
                # convert detected_objexts to XML
                # detected_objects = list [ (class_id, class_name, probability, xmin, ymin, xmax, ymax)]
                if len(detected_objects) > 0:
                    print(detected_objects)
                    if save_inference:
                        image_base_name = str(int(start_time))
                        image_name = os.path.join(image_path,
                                                  image_base_name + '.jpg')
                        annotation_name = os.path.join(
                            annotation_path, image_base_name + '.xml')
                        print("saving:", image_name, frame.shape,
                              annotation_name)
                        # original image - h: 480  w: 640
                        cv2.imwrite(image_name, orig_image)
                        # this function generates & saves the XML annotation
                        annotation_xml = annotation.inference_to_xml(
                            name, image_name, orig_image_dim, detected_objects,
                            annotation_dir)

                # enlarged_inference = cv2.resize(inference_image, (1440, 1440), interpolation = cv2.INTER_AREA)
                cv2.imshow(name, inference_image)  # show the inferance
                # cv2.imshow(name, orig_image)     # show the raw image from the camera
            else:
                cv2.imshow(name, frame)
        else:
            print("-- no frame returned -- ")

        # time.sleep(3)

        # Use key 'q' to close window
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()