Example #1
0
 def test_list_edge_tpu_paths(self):
     num_all = len(
         edgetpu_utils.ListEdgeTpuPaths(edgetpu_utils.EDGE_TPU_STATE_NONE))
     unused_engine = BasicEngine(
         test_utils.test_data_path('mobilenet_v1_1.0_224_quant.tflite'))
     num_assigned = len(
         edgetpu_utils.ListEdgeTpuPaths(
             edgetpu_utils.EDGE_TPU_STATE_ASSIGNED))
     self.assertEqual(num_assigned, 1)
     num_available = len(
         edgetpu_utils.ListEdgeTpuPaths(
             edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED))
     self.assertEqual(num_available, num_all - 1)
Example #2
0
    def test_use_all_edge_tpu(self):
        available_tpus = edgetpu_utils.ListEdgeTpuPaths(
            edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
        recorded_tpus = []
        engine_list = []
        for _ in available_tpus:
            engine = BasicEngine(
                test_utils.test_data_path('mobilenet_v1_1.0_224_quant.tflite'))
            recorded_tpus.append(engine.device_path())
            engine_list.append(engine)

        remaining_tpus = edgetpu_utils.ListEdgeTpuPaths(
            edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
        self.assertEqual(0, len(remaining_tpus))
        self.assertTupleEqual(tuple(recorded_tpus), available_tpus)
Example #3
0
 def test_device_path(self):
     all_edgetpu_paths = edgetpu_utils.ListEdgeTpuPaths(
         edgetpu_utils.EDGE_TPU_STATE_NONE)
     engine = BasicEngine(
         test_utils.test_data_path('mobilenet_v1_1.0_224_quant.tflite'),
         all_edgetpu_paths[0])
     self.assertEqual(engine.device_path(), all_edgetpu_paths[0])
Example #4
0
 def test_create_basic_engine_with_specific_path(self):
     edge_tpus = edgetpu_utils.ListEdgeTpuPaths(
         edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
     self.assertGreater(len(edge_tpus), 0)
     model_path = test_utils.test_data_path(
         'mobilenet_v1_1.0_224_quant_edgetpu.tflite')
     basic_engine = BasicEngine(model_path, edge_tpus[0])
     self.assertEqual(edge_tpus[0], basic_engine.device_path())
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--classification_model',
        type=str,
        default=
        '/home/pi/projects/NewRover/all_models/niranjan_model_edgetpu.tflite',
        help='Path of classification model.')
    parser.add_argument(
        '--detection_model',
        help='Path of detection model.',
        type=str,
        default=
        '/home/pi/projects/NewRover/all_models/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite'
    )
    parser.add_argument(
        '--image',
        type=str,
        default='/home/pi/projects/NewRover/all_models/Niranjan/65.jpg',
        help='Path of the image.')
    parser.add_argument('--num_inferences',
                        help='Number of inferences to run.',
                        type=int,
                        default=2000)
    parser.add_argument(
        '--batch_size',
        help='Runs one model batch_size times before switching to the other.',
        type=int,
        default=10)

    args = parser.parse_args()

    edge_tpus = edgetpu_utils.ListEdgeTpuPaths(
        edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
    if len(edge_tpus) <= 1:
        print('This demo requires at least two Edge TPU available.')

    print(
        'Running %s and %s with one Edge TPU, # inferences %d, batch_size %d.'
        % (args.classification_model, args.detection_model,
           args.num_inferences, args.batch_size))
    cost_one_tpu = run_two_models_one_tpu(args.classification_model,
                                          args.detection_model, args.image,
                                          args.num_inferences, args.batch_size)
    print(
        'Running %s and %s with two Edge TPUs, # inferences %d.' %
        (args.classification_model, args.detection_model, args.num_inferences))
    #cost_two_tpus = run_two_models_two_tpus(args.classification_model,
    #                                        args.detection_model, args.image,
    #                                        args.num_inferences)

    print('Inference with one Edge TPU costs %.2f seconds.' % cost_one_tpu)
Example #6
0
def main():
    num_inferences = 30000
    input_filename = 'cat.bmp'
    num_tpus = len(
        edgetpu_utils.ListEdgeTpuPaths(edgetpu_utils.EDGE_TPU_STATE_NONE))
    model_names = [
        'mobilenet_v1_1.0_224_quant_edgetpu.tflite',
        'mobilenet_v2_1.0_224_quant_edgetpu.tflite',
        'ssd_mobilenet_v1_coco_quant_postprocess_edgetpu.tflite',
        'ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite',
        'inception_v1_224_quant_edgetpu.tflite',
        'inception_v2_224_quant_edgetpu.tflite',
        'inception_v3_299_quant_edgetpu.tflite',
        'inception_v4_299_quant_edgetpu.tflite',
    ]

    def show_speedup(inference_costs):
        logging.info('Single Edge TPU base time: %f seconds',
                     inference_costs[0])
        for i in range(1, len(inference_costs)):
            logging.info('# TPUs: %d, speedup: %f', i + 1,
                         inference_costs[0] / inference_costs[i])

    inference_costs_map = {}
    for model_name in model_names:
        task_type = 'classification'
        if 'ssd' in model_name:
            task_type = 'detection'
        inference_costs_map[model_name] = [0.0] * num_tpus
        for num_threads in range(num_tpus, 0, -1):
            cost = run_inference_job(model_name, input_filename,
                                     num_inferences, num_threads, task_type)
            inference_costs_map[model_name][num_threads - 1] = cost
            logging.info('model: %s, # threads: %d, cost: %f seconds',
                         model_name, num_threads, cost)
        show_speedup(inference_costs_map[model_name])

    logging.info('============Summary==========')
    for model_name in model_names:
        inference_costs = inference_costs_map[model_name]
        logging.info('---------------------------')
        logging.info('Model: %s', model_name)
        show_speedup(inference_costs)
Example #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--classification_model',
                        help='Path of classification model.',
                        required=True)
    parser.add_argument('--detection_model',
                        help='Path of detection model.',
                        required=True)
    parser.add_argument('--image', help='Path of the image.', required=True)
    parser.add_argument('--num_inferences',
                        help='Number of inferences to run.',
                        type=int,
                        default=2000)
    parser.add_argument(
        '--batch_size',
        help='Runs one model batch_size times before switching to the other.',
        type=int,
        default=10)

    args = parser.parse_args()

    edge_tpus = edgetpu_utils.ListEdgeTpuPaths(
        edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
    if len(edge_tpus) <= 1:
        print('This demo requires at least two Edge TPU available.')

    print(
        'Running %s and %s with one Edge TPU, # inferences %d, batch_size %d.'
        % (args.classification_model, args.detection_model,
           args.num_inferences, args.batch_size))
    cost_one_tpu = run_two_models_one_tpu(args.classification_model,
                                          args.detection_model, args.image,
                                          args.num_inferences, args.batch_size)
    print(
        'Running %s and %s with two Edge TPUs, # inferences %d.' %
        (args.classification_model, args.detection_model, args.num_inferences))
    cost_two_tpus = run_two_models_two_tpus(args.classification_model,
                                            args.detection_model, args.image,
                                            args.num_inferences)

    print('Inference with one Edge TPU costs %.2f seconds.' % cost_one_tpu)
    print('Inference with two Edge TPUs costs %.2f seconds.' % cost_two_tpus)
Example #8
0
def edge_tpus():
    """Yields all available unassigned Edge TPU devices.
    Set CORAL_VISIBLE_DEVICES environmental variable to a comma-separated list of device paths
    to make only those devices visible to the application.
    """

    try:
        env_cvd = os.environ.get("CORAL_VISIBLE_DEVICES")
        visible_devices = [x.strip() for x in env_cvd.split(",")
                           ] if env_cvd is not None else []

        devices = edgetpu_utils.ListEdgeTpuPaths(
            edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
        for device in devices:
            if len(visible_devices) > 0 and device not in visible_devices:
                continue

            yield device, CoralObjectDetector
    except (RuntimeError, NameError):
        return
def inferencer(results, frameBuffer, model, camera_width, camera_height):

    engine = None

    # Acquisition of TPU list without model assignment
    devices = edgetpu_utils.ListEdgeTpuPaths(
        edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)

    devopen = False
    for device in devices:
        try:
            engine = DetectionEngine(model, device)
            devopen = True
            break
        except:
            continue

    if devopen == False:
        print("TPU Devices open Error!!!")
        sys.exit(1)

    print("Loaded Graphs!!! ")

    while True:

        if frameBuffer.empty():
            continue

        # Run inference.
        color_image = frameBuffer.get()
        prepimg = color_image[:, :, ::-1].copy()
        prepimg = Image.fromarray(prepimg)

        tinf = time.perf_counter()
        ans = engine.DetectWithImage(prepimg,
                                     threshold=0.5,
                                     keep_aspect_ratio=True,
                                     relative_coord=False,
                                     top_k=10)
        print(time.perf_counter() - tinf, "sec")
        results.put(ans)
def _run_benchmark_for_cocompiled_models(model_names):
  """Runs benchmark for a given model set with random inputs. Models run
  inferences alternately with random inputs. It benchmarks the total time
  running each model once.

  Args:
    model_names: list of string, file names of the models.

  Returns:
    float, average sum of inferences times.
  """
  iterations = 200
  print('Benchmark for ', model_names)

  engines = []
  input_data_list = []
  edge_tpus = edgetpu_utils.ListEdgeTpuPaths(
      edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)

  for model_name in model_names:
    # Run models on a single edgetpu to achieve accurate benchmark results.
    engine = BasicEngine(test_utils.test_data_path(model_name), edge_tpus[0])

    # Prepare a random generated input.
    input_size = engine.required_input_array_size()
    random_input = test_utils.generate_random_input(1, input_size)

    # Convert it to a numpy.array.
    input_data = np.array(random_input, dtype=np.uint8)

    engines.append(engine)
    input_data_list.append(input_data)

  benchmark_time = timeit.timeit(
      lambda: _run_inferences(engines, input_data_list),
      number=iterations)

  # Time consumed for each iteration (milliseconds).
  time_per_inference = (benchmark_time / iterations) * 1000
  print(time_per_inference, 'ms (iterations = ', iterations, ')')
  return time_per_inference
Example #11
0
    def test_exhaust_all_edge_tpus(self):
        edge_tpus = edgetpu_utils.ListEdgeTpuPaths(
            edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
        # No need to test if there's only one Edge TPU available.
        if len(edge_tpus) <= 1:
            return
        model_path = test_utils.test_data_path(
            'mobilenet_v1_1.0_224_quant_edgetpu.tflite')
        unused_basic_engines = []
        for _ in edge_tpus:
            unused_basic_engines.append(BasicEngine(model_path))

        # Request one more Edge TPU to trigger the exception.
        error_message = None
        expected_message = (
            'Multiple Edge TPUs detected and all have been mapped to at least one '
            'model. If you want to share one Edge TPU with multiple models, '
            'specify `device_path` name.')
        try:
            _ = BasicEngine(model_path)
        except RuntimeError as e:
            error_message = str(e)
        self.assertEqual(expected_message, error_message)
def inferencer(results, frameBuffer, model, camera_width, camera_height):

    engine = None

    # Acquisition of TPU list without model assignment
    devices = edgetpu_utils.ListEdgeTpuPaths(
        edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)

    devopen = False
    for device in devices:
        try:
            engine = PoseEngine(model, device)
            devopen = True
            break
        except:
            continue

    if devopen == False:
        print("TPU Devices open Error!!!")
        sys.exit(1)

    print("Loaded Graphs!!! ")

    while True:

        if frameBuffer.empty():
            continue

        # Run inference.
        color_image = frameBuffer.get()
        prepimg = color_image[:, :, ::-1].copy()

        tinf = time.perf_counter()
        result, inference_time = engine.DetectPosesInImage(prepimg)
        print(time.perf_counter() - tinf, "sec")
        results.put(result)
Example #13
0
                help='USB Camera resolution (height). (Default=480)')
args = vars(ap.parse_args())

# initialize the original frame dimensions, new frame dimensions,
# and ratio between the dimensions
(W, H) = (None, None)
(newW, newH) = (args["width"], args["height"])
(rW, rH) = (None, None)

# define the two output layer names for the EAST detector model that
# we are interested -- the first is the output probabilities and the
# second can be used to derive the bounding box coordinates of text

# load the pre-trained EAST text detector
print("[INFO] loading EAST text detector...")
devices = edgetpu_utils.ListEdgeTpuPaths(
    edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
engine = BasicEngine(model_path=args["east"], device_path=devices[0])
offset = 0
output_offsets = [0]
for size in engine.get_all_output_tensors_sizes():
    offset += int(size)
    output_offsets.append(offset)

# if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False):
    print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    time.sleep(1.0)

# otherwise, grab a reference to the video file
else:
Example #14
0
def run_demo(args):

    time.sleep(2)
    arduino.new_user_function()

    functions_main.send_uno_lights(arduino.ser1, "none")
    functions_main.send_uno_lights(arduino.ser1, "move")

    #Setting communications

    if args.model_hpe == "models/posenet_mobilenet_v1_075_481_641_quant_decoder_edgetpu.tflite":
        camera_width = 640
        camera_height = 480
    elif args.model_hpe == "models/posenet_mobilenet_v1_075_353_481_quant_decoder_edgetpu.tflite":
        camera_width = 480
        camera_height = 360
    else:
        camera_width = 1280
        camera_height = 720

    model_width = 640
    model_height = 480

    #Loading labels based on the Object Detector model (different labels for different networks)
    if args.model_od == "models/mobilenet-ssd.xml":
        labels = [
            'Background', 'Person', 'Car', 'Bus', 'Bicycle', 'Motorcycle'
        ]  #???
    elif args.model_od == "models/ssdlite_mobilenet_v2/FP16/ssdlite_mobilenet_v2.xml" or "models/ssdlite_mobilenet_v2/FP32/ssdlite_mobilenet_v2.xml":
        labels = [
            " ",
        ]
        file1 = open("models/ssdlite_mobilenet_v2/labels.txt", 'r')
        while True:
            line = file1.readline().rstrip().split()
            if len(line) == 1: line = " "
            elif len(line) == 2: line = line[1]
            elif len(line) == 3: line = line[1] + " " + line[2]
            labels.append(line)
            if not line:
                labels.pop()
                break
        file1.close()

    # Posenet - Google Coral Inference
    print("#----- Loading Posenet - Coral Inference -----#")
    devices = edgetpu_utils.ListEdgeTpuPaths(
        edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
    engine = PoseEngine(args.model_hpe, devices[0])
    res = []
    print("#-----Done-----#")

    #Mobilenet - NCS2 Inference
    print("#-----Loading SSDLite Mobilenet v2 - NCS2 Inference -----#")
    ie = IECore()
    detector_object = Detector(ie,
                               path_to_model_xml=args.model_od,
                               device=args.device,
                               label_class=args.person_label)
    print("#-----Done-----#")

    #Custom Gaze Estimator predictor
    print("#-----Loading Gaze Estimator Net -----#")
    model_gaze = prepare_modelSingle('relu')
    model_gaze.load_weights(
        '/home/pi/Detection-and-Human-Pose-Estimation---RASPBERRY/models/trainedOnGazeFollow_weights.h5'
    )
    print("#-----Done-----#")

    #Framerate variables
    fps = ""

    #Human Interaction variables
    TIME_OUT = 40  # How much time do i have if i'm searching a human during the interaction loop?
    TIME_OUT_HUM = 10  # How much time can I stay without human?
    JA_TIME = 30  #duration of JA task analisys
    child_action_prec = "none"
    tracking_a_user = False  #is the obstacle i read from sonar an human?
    Finding_human = False  #am i looking for a human?
    global receiveAction
    global JointAttention
    global child_action
    firstTime = True
    lookTo = ""
    previousAngle = 0
    angle = 0
    interaction = 0
    tooCloseCount = 0
    tooFarCount = 0

    #--> Counting the time that manages the reseach of a human
    time_out_system = 0
    start_time_out_system = 0
    current_time_out_system = 0

    #--> Counting the time of interaction
    time_out_system_hum = 0
    start_time_out_system_hum = 0
    current_time_out_system_hum = 0

    #---> Counting JA task observation
    start_time_JA = 0
    duration_JA = 0
    actual_time_JA = 0

    start_time_LOOKING = 0
    duration_LOOKING = 0
    actual_time_LOOKING = 0

    toleranceFrame = 0
    greetedTeddy = False
    oldTargetBox = []
    targetBox = []

    #Camera Thread
    vs = WebcamVideoStream(camera_width, camera_height, src=0).start()

    functions_main.send_uno_lights(arduino.ser1, "none")

    while True:

        t1 = time.perf_counter()
        frame = vs.read()
        frame = cv2.resize(frame, (model_width, model_height))
        color = (0, 0, 255)

        arduino.new_user_function()

        # Run Object Detection
        previousAngle = angle

        bboxes, labels_detected, score_detected, bboxes_person, bboxes_teddy = detector_object.detect(
            frame)
        main_person = [0, 0, 0, 0]
        main_teddy = [0, 0, 0, 0]
        areas = []
        for bbox in bboxes_person:
            area = bbox[2] * bbox[3]
            areas.append(area)
        if areas:
            box_person_num = areas.index(max(areas))
            main_person = [
                bboxes_person[box_person_num][0],
                bboxes_person[box_person_num][1],
                bboxes_person[box_person_num][2],
                bboxes_person[box_person_num][3]
            ]
            angle = target_camera_angle(main_person, camera_width)
        else:
            angle = 0

        areas = []

        for bbox in bboxes_teddy:
            area = bbox[2] * bbox[3]
            areas.append(area)
        if areas:
            box_teddy_num = areas.index(max(areas))
            main_teddy = [
                bboxes_teddy[box_teddy_num][0], bboxes_teddy[box_teddy_num][1],
                bboxes_teddy[box_teddy_num][2], bboxes_teddy[box_teddy_num][3]
            ]
            angleTeddy = target_camera_angle(main_teddy, camera_width)
            targetBox = [[main_teddy[0], main_teddy[1]],
                         [
                             main_teddy[0] + main_teddy[2],
                             main_teddy[1] + main_teddy[3]
                         ], [main_teddy[0] + main_teddy[2], main_teddy[1]],
                         [main_teddy[0], main_teddy[1] + main_teddy[3]]]

        else:
            angleTeddy = 0

        ####-----START HUMAN INTERACTION-----####
        count = 0

        #arduino.new_user_function() #Connect with the Mega and obtain data from sensors

        #interaction = 0 or interaction=1 is when the system is trying to estabilish an interaction with the child
        #interaction = 2 is when the robot is already interacting with the human

        if JointAttention:
            time_out_system_hum = 0
            print("Joint Attention Task")
            if angleTeddy != 0 and greetedTeddy == False:  #there is a teddy in the FOV of robot
                print("Teddy Bear in the FOV")
                if (abs(angleTeddy) <=
                        10):  #the teddy is in front of the robot
                    if arduino.new_dist < 80:
                        print("Inviting to play with the teddy...")
                        functions_main.send_uno_lights(arduino.ser1,
                                                       "excited_attract")
                        functions_main.send_initial_action_arduino(
                            "backForth", arduino.ser, "happy")
                        functions_main.send_initial_action_arduino(
                            "scared", arduino.ser, "none")
                        greetedTeddy = True
                        start_time_LOOKING = time.time()
                    elif arduino.new_dist > 80:
                        print("Teddy too far, approaching...")
                        functions_main.send_initial_action_arduino(
                            "move", arduino.ser, "none")
                elif angleTeddy >= 10:
                    print("Adjusting right...")
                    functions_main.send_uno_lights(arduino.ser1, "rotateRight")
                    functions_main.send_initial_action_arduino(
                        "rotateRight", arduino.ser, "none")
                elif angleTeddy <= -10:
                    print("Adjusting left...")
                    functions_main.send_uno_lights(arduino.ser1, "rotateLeft")
                    functions_main.send_initial_action_arduino(
                        "rotateLeft", arduino.ser, "none")
            elif targetBox == oldTargetBox:  #if there is no Teddy Bear identified for a frame
                toleranceFrame += 1
                if toleranceFrame == 10 and greetedTeddy == False:  #if there actually is no teddy bear in the scene
                    print("Searching for a Teddy Bear...")
                    functions_main.send_uno_lights(arduino.ser1, "rotateRight")
                    functions_main.send_initial_action_arduino(
                        "rotateRight", arduino.ser, "none")
                    toleranceFrame = 0
                elif toleranceFrame == 20 and greetedTeddy:
                    #Task Completed (?)
                    toleranceFrame = 0

            if greetedTeddy:
                #I pretend that it did not spontaneously went out of the scene, so I can assume that Teddy is still on position
                actual_time_JA = time.time()
                duration_JA = duration_JA + (actual_time_JA - start_time_JA)
                start_time_JA = actual_time_JA
                print("Time JA: {:.1f}".format(duration_JA))
                prep_image = frame[:, :, ::-1].copy()
                res, inference_time = engine.DetectPosesInImage(prep_image)
                if res:
                    head, scores_head = elaborate_pose(res)
                    frame = overlay_on_image(frame, res, model_width,
                                             model_height, main_person,
                                             args.modality)
                    frame, gazeAngle, headCentroid, prediction = elaborate_gaze(
                        frame, head, scores_head, model_gaze)
                    if headCentroid[1] == 0: gazeAngle = 0
                    if gazeAngle < 0: gazeAngle = 360 + gazeAngle
                    targetAngleMax = -360
                    targetAngleMin = 360
                    if targetBox:
                        for vertices in targetBox:
                            targetAngle = -math.degrees(
                                math.atan2(vertices[1] - headCentroid[1],
                                           vertices[0] - headCentroid[0]))
                            cv2.line(
                                frame, (vertices[0], vertices[1]),
                                (int(headCentroid[0]), int(headCentroid[1])),
                                (255, 255, 255), 1)
                            if targetAngle > targetAngleMax:
                                targetAngleMax = targetAngle
                                print("update max")
                            if targetAngle < targetAngleMin:
                                targetAngleMin = targetAngle
                                print("update min")
                        if targetAngleMax < 0:
                            targetAngleMax = 360 + targetAngleMax
                        if targetAngleMin < 0:
                            targetAngleMin = 360 + targetAngleMin
                        print(targetAngleMin, targetAngleMax)
                        print(gazeAngle)
                        actual_time_LOOKING = time.time()
                        if gazeAngle > (targetAngleMin - 5) and gazeAngle < (
                                targetAngleMax + 5):
                            color = (0, 255, 0)
                            duration_LOOKING = duration_LOOKING + abs(
                                actual_time_LOOKING - start_time_LOOKING)
                        start_time_LOOKING = actual_time_LOOKING
                print("Time spent looking at the teddy: {:.1f}".format(
                    duration_LOOKING))
        else:
            if duration_JA != 0 and duration_LOOKING != 0:
                now = datetime.now()
                dt_string = now.strftime("%d/%m/%y %H:%M:%S")
                data = dt_string + ',' + str(duration_JA) + ',' + str(
                    duration_LOOKING) + '\n'
                with open('experiment_records.csv', 'a') as fp:
                    print("Record stored successfully")
                    fp.write(data)
            duration_JA = 0
            duration_LOOKING = 0

        if interaction != 2 and not JointAttention:  #If I'm not interacting with the human
            print("Interaction != 2, I'm not interacting with the human")
            if arduino.old_user != "none":  #if an object is detected by the sonar, check if it is a human
                print("Object detected by sonars")
                if angle != 0:  # this can be replaced with a check if it is human, so this translate to if there is a human
                    print("Human detected in the FOV")
                    count = 4
                    tracking_a_user = functions_main.human_verification(
                        angle, arduino.old_user,
                        count)  #it check if obstacle from sonar is a human
                    if tracking_a_user == True:
                        print("Object from sonar is a human")
                        interaction = 1
                        ### --- I Should check if the path between the robot and the child is clear
                        ##if the human is free
                        ##if the human is free for few instant
                        interaction = 2
                        start_time_out_system_hum = time.time()
                        #THERE IS A HUMAN READY TO INTERACT WITH!
                        Finding_human = False
                    else:  #if it find an object that is not a human, must rotate until that obstacle is an human
                        print("Object from sonar is not a human")
                        if (
                                abs(angle) <= 10 and arduino.new_dist > 150
                        ):  #the human is in front of the robot and eventually right/left osbstacle are far
                            print("Human in front of me - Approaching ")
                            functions_main.send_uno_lights(
                                arduino.ser1, "move")
                            functions_main.send_initial_action_arduino(
                                "move", arduino.ser, "move")
                        elif angle >= 10:
                            print("Human detected in right position...")
                            functions_main.send_uno_lights(
                                arduino.ser1, "rotateRight")
                            functions_main.send_initial_action_arduino(
                                "rotateRight", arduino.ser, "none")
                        elif angle <= -10:
                            print("Human detected in left position...")
                            functions_main.send_uno_lights(
                                arduino.ser1, "rotateLeft")
                            functions_main.send_initial_action_arduino(
                                "rotateLeft", arduino.ser, "none")
                else:  #if there is no human
                    print("Searching ...")
                    functions_main.send_uno_lights(arduino.ser1, "rotateRight")
                    functions_main.send_initial_action_arduino(
                        "rotateRight", arduino.ser, "none")
                print("No object close")

            else:
                if angle != 0:  #there is a human in the FOV of robot
                    print("No object, but human in the FOV")
                    interaction = 1
                    if (abs(angle) <= 10):  #the human is in front of the robot
                        print("Approaching the human")
                        functions_main.send_uno_lights(arduino.ser1, "move")
                        functions_main.send_initial_action_arduino(
                            "move", arduino.ser, "move")
                    elif angle >= 10:
                        print("Searching right...")
                        functions_main.send_uno_lights(arduino.ser1,
                                                       "rotateRight")
                        functions_main.send_initial_action_arduino(
                            "rotateRight", arduino.ser, "none")
                    elif angle <= -10:
                        print("Searching left...")
                        functions_main.send_uno_lights(arduino.ser1,
                                                       "rotateLeft")
                        functions_main.send_initial_action_arduino(
                            "rotateLeft", arduino.ser, "none")
                else:  #if there is no human
                    print("Searching")
                    functions_main.send_uno_lights(arduino.ser1, "rotateRight")
                    functions_main.send_initial_action_arduino(
                        "rotateRight", arduino.ser, "none")

        elif interaction == 2 and not JointAttention:  # interaction = 2, so i'm starting the interaction loop
            print("INTERACTION LOOP")
            if time_out_system_hum > TIME_OUT_HUM:  #If there is no human for too long
                print("INTERACTION LOOP - I've lost contact with the human")
                Finding_human = True  # Am i looking for a human?
                time_out_system = 0
                start_time_out_system = time.time()
                time_out_system_hum = 0
            elif time_out_system_hum <= TIME_OUT_HUM and time_out_system < TIME_OUT and Finding_human == False:
                print("INTERACTION LOOP - Preparing the interaction")
                #If there is a human interacting and i'm inside the timeout
                time_out_system = 0
                print("Distance from sonar = {:.1f}".format(arduino.new_dist))
                if arduino.new_dist > 120.0:  #if the distance to the chld is bigger than , get closer
                    tooCloseCount = 0
                    tooFarCount += 1
                    if tooFarCount > 10:
                        tooFarCount = 0
                        if abs(angle) <= 10 and angle != 0:
                            print(
                                "INTERACTION LOOP - Child is far and in front "
                            )
                            functions_main.send_uno_lights(
                                arduino.ser1, "move")
                            functions_main.send_initial_action_arduino(
                                "move", arduino.ser, "move_find")
                        elif angle > 10:
                            print("INTERACTION LOOP - Child is on the right ")
                            functions_main.send_uno_lights(
                                arduino.ser1, "rotateRight")
                            functions_main.send_initial_action_arduino(
                                "rotateRight", arduino.ser, "none")
                        elif angle < -10:
                            print("INTERACTION LOOP - Child is on the left ")
                            functions_main.send_uno_lights(
                                arduino.ser1, "rotateLeft")
                            functions_main.send_initial_action_arduino(
                                "rotateLeft", arduino.ser, "none")
                elif arduino.new_dist < 40.0:
                    tooCloseCount += 1
                    tooFarCount = 0
                    if tooCloseCount > 10:  #If I'm too cloose for too much time ( I need this time window in order to let the children interact with the robot)
                        print("INTERACTION LOOP - Too close")
                        functions_main.send_uno_lights(arduino.ser1, "move")
                        functions_main.send_initial_action_arduino(
                            "scared", arduino.ser, "move_find")
                        tooCloseCount = 0
                else:  #if it's closer than 1.5m perform the interaction loop normally and select action of the child (child_action)
                    # Run Object Detection. I start now the timer for human time out because else comprehend 20 < dist 130 AND dist = Max dist, so no object sensed by the sonar
                    tooCloseCount = 0
                    tooFarCount = 0
                    current_time_out_system_hum = time.time()
                    time_out_system_hum = time_out_system_hum + (
                        current_time_out_system_hum -
                        start_time_out_system_hum)
                    start_time_out_system_hum = current_time_out_system_hum

                    if angle != 0:
                        print(
                            "INTERACTION LOOP - Correctly interacting, waiting to receive an action"
                        )
                        time_out_system_hum = 0
                        if firstTime:
                            functions_main.send_uno_lights(
                                arduino.ser1, "excited_attract")
                            functions_main.send_initial_action_arduino(
                                "excited_attract", arduino.ser,
                                "excited_attract")
                            firstTime = False
                        if receiveAction:
                            if child_action != "joint":
                                functions_main.decide_action(
                                    child_action
                                )  #decide robot behaviour based on action of the child and movement of the robot
                                functions_main.send_uno_lights(
                                    arduino.ser1,
                                    functions_main.current_action)
                                functions_main.send_initial_action_arduino(
                                    functions_main.current_action, arduino.ser,
                                    functions_main.current_action)
                                receiveAction = False
                            else:
                                JointAttention = True
                                start_time_JA = time.time()
                                duration_JA = 0
                    else:  #if no human
                        if previousAngle > 0 and angle == 0:
                            lookTo = "rotateRight"
                        elif previousAngle < 0 and angle == 0:
                            lookTo = "rotateLeft"
                    print("Child Action: " + child_action + " | " +
                          "Robot Action: " + functions_main.current_action)
                print("Time Out Human {:.1f} / 10 Sec".format(
                    time_out_system_hum))
            elif Finding_human == True and time_out_system < TIME_OUT:  #if i need to find the child and i'm inside the timout
                print("INTERACTION LOOP - Looking for a human")
                #I need to find the human. I start counting for the general TIME_OUT
                # Run Object Detection
                current_time_out_system = time.time()
                time_out_system = time_out_system + (current_time_out_system -
                                                     start_time_out_system)
                start_time_out_system = current_time_out_system
                print("Time out: {:.1f} / 40 ".format(time_out_system))

                if angle != 0:  # this can be replaced with a check if it is human, so this translate to if there is a human
                    print("INTERACTION LOOP - Human detecte in the FOV")
                    Finding_human = False
                    functions_main.send_uno_lights(arduino.ser1,
                                                   "excited_attract")
                    functions_main.send_initial_action_arduino(
                        "excited_attract", arduino.ser, "excited_attract")
                    time_out_system_hum = 0
                    time_out_system = 0
                else:
                    print("INTERACTION LOOP - Searching")
                    functions_main.send_uno_lights(arduino.ser1, "none")
                    functions_main.send_initial_action_arduino(
                        lookTo, arduino.ser, "none")
            elif Finding_human == True and time_out_system > TIME_OUT:  #If 'm looking for the children and i run out of time
                print("Terminating the program")
                child_action = "QUIT"

        ####-----END HUMAN INTERACTION----####

        oldTargetBox = targetBox.copy()

        t2 = time.perf_counter()
        elapsedTime = t2 - t1
        fps = 1 / elapsedTime
        i = 0

        if args.no_show:
            print("Cicli al secondo: {:.1f} ".format(float(fps)))
            continue

        if child_action == "QUIT":
            if duration_JA != 0 and duration_LOOKING != 0:
                now = datetime.now()
                dt_string = now.strftime("%d/%m/%y %H:%M:%S")
                data = dt_string + ',' + str(duration_JA) + ',' + str(
                    duration_LOOKING) + '\n'
                with open('experiment_records.csv', 'a') as fp:
                    print("Record stored successfully")
                    fp.write(data)
            break

        #visualization
        for bbox in bboxes:
            cv2.rectangle(frame, (bbox[0], bbox[1]),
                          (bbox[0] + bbox[2], bbox[1] + bbox[3]), color, 1)
            if len(labels_detected) > 0:
                cv2.putText(frame, labels[labels_detected[i].astype(int)],
                            (bbox[0] + 3, bbox[1] - 7),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
                cv2.putText(frame, "{:.3f}".format(score_detected[i]),
                            (bbox[0] + 3, bbox[1] + 7),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
                i += 1  #indent at the same level of putText

        cv2.putText(frame, "FPS: {:.1f}".format(float(fps)), (5, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)

        cv2.imshow('Demo', frame)

        key = cv2.waitKey(1)
        if key == 27:
            if duration_JA != 0 and duration_LOOKING != 0:
                now = datetime.now()
                dt_string = now.strftime("%d/%m/%y %H:%M:%S")
                data = dt_string + ',' + str(duration_JA) + ',' + str(
                    duration_LOOKING) + '\n'
                with open('experiment_records.csv', 'a') as fp:
                    print("Record stored successfully")
                    fp.write(data)
            break

    vs.stop()
Example #15
0
 def __init__(self, model_head_pose):
     # Init Head Pose Estimator
     self.devices = edgetpu_utils.ListEdgeTpuPaths(edgetpu_utils.EDGE_TPU_STATE_UNASSIGNED)
     self.engine = BasicEngine(model_path=model_head_pose, device_path=self.devices[0])
     self.model_height = self.engine.get_input_tensor_shape()[1]
     self.model_width  = self.engine.get_input_tensor_shape()[2]
from edgetpu.basic import edgetpu_utils

version = edgetpu_utils.GetRuntimeVersion()
print(version)

all_edgetpu_paths = edgetpu_utils.ListEdgeTpuPaths(
    edgetpu_utils.EDGE_TPU_STATE_NONE)
print('Available EdgeTPU Device(s):')
print(''.join(all_edgetpu_paths))