Exemple #1
0
class FrontEnd(object):
    """ Maintains the Tello display and moves it through the keyboard keys.
        Press escape key to quit.
        The controls are:
            - T: Takeoff
            - L: Land
            - Arrow keys: Forward, backward, left and right.
            - A and D: Counter clockwise and clockwise rotations
            - W and S: Up and down.
    """
    def __init__(self):
        # Init pygame
        pygame.init()

        # Creat pygame window
        pygame.display.set_caption("Tello video stream")
        self.screen = pygame.display.set_mode([960, 720])

        # Init Tello object that interacts with the Tello drone
        self.tello = Tello()

        # Drone velocities between -100~100
        self.for_back_velocity = 0
        self.left_right_velocity = 0
        self.up_down_velocity = 0
        self.yaw_velocity = 0
        self.speed = 10

        self.send_rc_control = False

        # create update timer
        pygame.time.set_timer(pygame.USEREVENT + 1, 50)

    def run(self):

        if not self.tello.connect():
            print("Tello not connected")
            return

        if not self.tello.set_speed(self.speed):
            print("Not set speed to lowest possible")
            return

        # In case streaming is on. This happens when we quit this program without the escape key.
        if not self.tello.streamoff():
            print("Could not stop video stream")
            return

        if not self.tello.streamon():
            print("Could not start video stream")
            return

        frame_read = self.tello.get_frame_read()

        should_stop = False
        while not should_stop:

            for event in pygame.event.get():
                if event.type == pygame.USEREVENT + 1:
                    self.update()
                elif event.type == pygame.QUIT:
                    should_stop = True
                elif event.type == pygame.KEYDOWN:
                    if event.key == pygame.K_ESCAPE:
                        should_stop = True
                    else:
                        self.keydown(event.key)
                elif event.type == pygame.KEYUP:
                    self.keyup(event.key)

            if frame_read.stopped:
                frame_read.stop()
                break

            self.screen.fill([0, 0, 0])
            frame = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB)
            frame = np.rot90(frame)
            frame = np.flipud(frame)
            frame = pygame.surfarray.make_surface(frame)
            self.screen.blit(frame, (0, 0))
            pygame.display.update()

            time.sleep(1 / FPS)

        # Call it always before finishing. To deallocate resources.
        self.tello.end()

    def keydown(self, key):
        """ Update velocities based on key pressed
        Arguments:
            key: pygame key
        """
        if key == pygame.K_UP:  # set forward velocity
            self.for_back_velocity = S
        elif key == pygame.K_DOWN:  # set backward velocity
            self.for_back_velocity = -S
        elif key == pygame.K_LEFT:  # set left velocity
            self.left_right_velocity = -S
        elif key == pygame.K_RIGHT:  # set right velocity
            self.left_right_velocity = S
        elif key == pygame.K_w:  # set up velocity
            self.up_down_velocity = S
        elif key == pygame.K_s:  # set down velocity
            self.up_down_velocity = -S
        elif key == pygame.K_a:  # set yaw counter clockwise velocity
            self.yaw_velocity = -S
        elif key == pygame.K_d:  # set yaw clockwise velocity
            self.yaw_velocity = S
        elif key == pygame.K_b:  # get  battery
            print(self.tello.get_battery())
        elif key == pygame.K_c:  # send command from terminal
            command = input(
                'Enter command (speed?time?battery?tof?height?baro?temp?wifi?sn?sdk?altitude?)'
            )
            print(self.tello.send_read_command(command))
        elif key == pygame.K_1:
            print('flip right')
            self.tello.flip('r')
        elif key == pygame.K_2:
            print('flip left')
            self.tello.flip('l')
        elif key == pygame.K_3:
            print('flip forward')
            self.tello.flip('f')
        elif key == pygame.K_4:
            print('flip back')
            self.tello.flip('b')

    def keyup(self, key):
        """ Update velocities based on key released
        Arguments:
            key: pygame key
        """
        if key == pygame.K_UP or key == pygame.K_DOWN:  # set zero forward/backward velocity
            self.for_back_velocity = 0
        elif key == pygame.K_LEFT or key == pygame.K_RIGHT:  # set zero left/right velocity
            self.left_right_velocity = 0
        elif key == pygame.K_w or key == pygame.K_s:  # set zero up/down velocity
            self.up_down_velocity = 0
        elif key == pygame.K_a or key == pygame.K_d:  # set zero yaw velocity
            self.yaw_velocity = 0
        elif key == pygame.K_t:  # takeoff
            self.tello.takeoff()
            self.send_rc_control = True
        elif key == pygame.K_l:  # land
            self.tello.land()
            self.send_rc_control = False

    def update(self):
        """ Update routine. Send velocities to Tello."""
        if self.send_rc_control:
            self.tello.send_rc_control(self.left_right_velocity,
                                       self.for_back_velocity,
                                       self.up_down_velocity,
                                       self.yaw_velocity)
Exemple #2
0
                        print("Rotating left")
                        tello.rotate_clockwise(360)

                    if _split[1] == "rotate_right" and takeoff == True:
                        print('Received', data)
                        print("Rotating left")
                        tello.rotate_counter_clockwise(360)

                    if _split[1] == "stop" and takeoff == True:
                        print('Received', data)
                        _move = (0, 0, 0, 0)
                        print("Stopping", _move)
                        tello.send_rc_control(int(_move[0]), int(_move[1]),
                                              int(_move[2]), int(_move[3]))

                    if _split[1] == "move" and takeoff == True:
                        print('Received', data)
                        _move = (_split[2], _split[3], _split[4], _split[5])
                        print("Moving", _move)
                        tello.send_rc_control(int(_move[0]), int(_move[1]),
                                              int(_move[2]), int(_move[3]))

                    if _split[1] == "flip" and takeoff == True:
                        print('Received', data)
                        tello.flip(_split[2])

                    if _split[1] == "quit":
                        break
                except Exception as e:
                    print(e)
Exemple #3
0
def main():
    tello = Tello()
    tello.connect()

    vk_session = vk_api.VkApi(token='TOKEN')
    vk = vk_session.get_api()

    letsgo = VkKeyboard(one_time=True)
    letsgo.add_button('Начать', color=VkKeyboardColor.DEFAULT)

    keyboard_takeoff = VkKeyboard(one_time=True)
    keyboard_takeoff.add_button('Взлёт', color=VkKeyboardColor.POSITIVE)

    keyboard = VkKeyboard(one_time=True)
    keyboard.add_button('Вверх', color=VkKeyboardColor.PRIMARY)
    keyboard.add_button('Вниз', color=VkKeyboardColor.PRIMARY)
    keyboard.add_line()
    keyboard.add_button('Вперёд', color=VkKeyboardColor.PRIMARY)
    keyboard.add_line()
    keyboard.add_button('Влево', color=VkKeyboardColor.PRIMARY)
    keyboard.add_button('Флип', color=VkKeyboardColor.NEGATIVE)
    keyboard.add_button('Вправо', color=VkKeyboardColor.PRIMARY)
    keyboard.add_line()
    keyboard.add_button('Назад', color=VkKeyboardColor.PRIMARY)
    keyboard.add_line()
    keyboard.add_button('Проверка', color=VkKeyboardColor.DEFAULT)
    keyboard.add_line()
    keyboard.add_button('Посадка', color=VkKeyboardColor.NEGATIVE)

    flip_keyboard = VkKeyboard(one_time=False)
    flip_keyboard.add_button('Флип вперёд', color=VkKeyboardColor.DEFAULT)
    flip_keyboard.add_line()
    flip_keyboard.add_button('Флип влево', color=VkKeyboardColor.DEFAULT)
    flip_keyboard.add_button('Флип вправо', color=VkKeyboardColor.DEFAULT)
    flip_keyboard.add_line()
    flip_keyboard.add_button('Флип назад', color=VkKeyboardColor.DEFAULT)
    flip_keyboard.add_line()
    flip_keyboard.add_button('Доступность флипа',
                             color=VkKeyboardColor.NEGATIVE)
    flip_keyboard.add_line()
    flip_keyboard.add_button('Вернуться в меню управления',
                             color=VkKeyboardColor.NEGATIVE)

    mainmenu_keyboard = VkKeyboard(one_time=True)
    mainmenu_keyboard.add_button('Начать', color=VkKeyboardColor.DEFAULT)
    mainmenu_keyboard.add_line()
    mainmenu_keyboard.add_button('Конец работы', color=VkKeyboardColor.DEFAULT)

    longpoll = VkLongPoll(vk_session)

    for event in longpoll.listen():
        if event.type == VkEventType.MESSAGE_NEW and event.to_me and event.text:
            if event.text.lower() == 'флип':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=flip_keyboard.get_keyboard(),
                    message='Переход в меню флипов')
            if event.text.lower() == 'начать':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=keyboard_takeoff.get_keyboard(),
                    message='Взлет?')
            if event.text.lower() == 'конец работы':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    message='Конец работы')
            if event.text.lower() == 'взлёт':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=keyboard.get_keyboard(),
                    message='Куда летим?')
                tello.takeoff()
                print('Взлёт')
            if event.text.lower() == 'посадка':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=mainmenu_keyboard.get_keyboard(),
                    message='Посадка')
                tello.land()
                print('Посадка')
            if event.text.lower() == 'вперёд':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=keyboard.get_keyboard(),
                    message='Полёт вперёд')
                tello.move_forward(100)
                print('Вперёд')
            if event.text.lower() == 'влево':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=keyboard.get_keyboard(),
                    message='Полёт влево')
                tello.move_left(100)
                print('Влево')
            if event.text.lower() == 'вправо':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=keyboard.get_keyboard(),
                    message='Полёт вправо')
                tello.move_right(100)
                print('Вправо')
            if event.text.lower() == 'назад':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=keyboard.get_keyboard(),
                    message='Полёт назад')
                tello.move_back(100)
                print('Назад')
            if event.text.lower() == 'проверка':
                x = tello.get_battery()
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=keyboard.get_keyboard(),
                    message='Заряд баттареи ' + x)
                print('Проверка')
            if event.text.lower() == 'вверх':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=keyboard.get_keyboard(),
                    message='Полёт вверх')
                tello.move_up(20)
                print('Вверх')
            if event.text.lower() == 'вниз':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=keyboard.get_keyboard(),
                    message='Полёт вниз')
                tello.move_down(20)
                print('Вниз')
            if event.text.lower() == 'флип назад':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=flip_keyboard.get_keyboard(),
                    message='Флип назад')
                tello.flip('b')
                print('Флип назад')
            if event.text.lower() == 'флип вперёд':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=flip_keyboard.get_keyboard(),
                    message='Флип вперёд')
                tello.flip('f')
                print('Флип вперёд')
            if event.text.lower() == 'флип влево':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=flip_keyboard.get_keyboard(),
                    message='Флип влево')
                tello.flip('l')
                print('Флип влево')
            if event.text.lower() == 'флип вправо':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=flip_keyboard.get_keyboard(),
                    message='Флип вправо')
                tello.flip('r')
                print('Флип вправо')
            if event.text.lower() == 'доступность флипа':
                x = tello.get_battery()
                if x == 'ok':
                    vk.messages.send(user_id=event.user_id,
                                     keyboard=flip_keyboard.get_keyboard(),
                                     message='Нажмите ещё раз')
                else:
                    if int(x) > 50:
                        vk.messages.send(user_id=event.user_id,
                                         keyboard=flip_keyboard.get_keyboard(),
                                         message='Заряд баттареи ' + x +
                                         'Флип доступен')
                    else:
                        vk.messages.send(user_id=event.user_id,
                                         keyboard=flip_keyboard.get_keyboard(),
                                         message='Заряд баттареи ' + x +
                                         'Флип недоступен')
            if event.text.lower() == 'вернуться в меню управления':
                vk.messages.send(  # Отправляем сообщение
                    user_id=event.user_id,
                    keyboard=keyboard.get_keyboard(),
                    message='Возврат в меню управления')
                print('Возврат в меню управления')
    tello.end()
Exemple #4
0
class Drone:
    SEARCH_TIMEOUT = 10 * 1000

    def __init__(self):
        pygame.init()
        pygame.display.set_caption("Tello video stream")
        self.screen = pygame.display.set_mode([960, 720])

        self.tello = Tello()

        # Drone velocities between -100~100
        self.for_back_velocity = 0
        self.left_right_velocity = 0
        self.up_down_velocity = 0
        self.yaw_velocity = 0
        self.speed = 10

        self.command_queue = []
        self.shutdown = False
        self.should_stop = False

        # create update timer
        pygame.time.set_timer(USEREVENT + 1, 50)
        self.tello.state = State.initializing

    def flip(self, direction='b'):
        for event in pygame.event.get():
            if event.type == GameEvents.VIDEO_EVENT.value:
                log.info('Flipping')
                self.tello.state = State.flipping
                self.tello.flip(direction)
                log.info('Idle')
                self.tello.state = State.idle

    def fly(self):
        if not self.tello.connect():
            print("Tello not connected")
            return

        if not self.tello.set_speed(self.speed):
            print("Not set speed to lowest possible")
            return
        # In case streaming is on. This happens when we quit this program without the escape key.
        if not self.tello.streamoff():
            print("Could not stop video stream")
            return

        if not self.tello.streamon():
            print("Could not start video stream")
            return
        frame_read = self.tello.get_frame_read()
        time.sleep(2)
        self.tello.takeoff()
        if self.tello.state is State.initializing:
            self.tello.state = State.yawing
            self.yaw_velocity = 100
        while not self.should_stop:
            for event in pygame.event.get():
                if event.type == USEREVENT + 1:
                    if (self.tello.state is State.initializing or
                            self.tello.state is State.yawing or
                            self.tello.state is State.idle):
                        self.update()
                if event.type == GameEvents.VIDEO_EVENT.value:
                    if self.tello.state is State.yawing:
                        self.yaw_velocity = 0
                        self.tello.state = State.idle
                    elif self.tello.state is State.idle:
                        self.flip()
                elif event.type == KEYDOWN:
                    self.should_stop = True
                    # if event.key == K_ESCAPE:
                    #     log.info('Landing')
                    #     self.tello.land()
                    #     break
            if frame_read.stopped:
                frame_read.stop()
                break

            self.screen.fill([0, 0, 0])
            frame = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB)
            frame = np.rot90(frame)
            frame = np.flipud(frame)
            frame = pygame.surfarray.make_surface(frame)
            self.screen.blit(frame, (0, 0))
            pygame.display.update()

            time.sleep(1 / FPS)

        self.tello.end()

    def update(self):
        """ Update routine. Send velocities to Tello."""
        if self.tello.state is not State.flipping:
            self.tello.send_rc_control(
                self.left_right_velocity, self.for_back_velocity,
                self.up_down_velocity, self.yaw_velocity)
Exemple #5
0
        elif _cmd == "move_right":
            # left_right_velocity, forward_backward_velocity, up_down_velocity, yaw_velocity
            command = f"move,{-SPEED},0,0,0"

        elif _cmd == "rotate_left":
            # left_right_velocity, forward_backward_velocity, up_down_velocity, yaw_velocity
            command = f"rotate_left"
        elif _cmd == "rotate_right":
            # left_right_velocity, forward_backward_velocity, up_down_velocity, yaw_velocity
            command = f"rotate_right"

        elif _cmd == "flip_right":
            command = f"flip,r"
            if takeoff == True:
                tello.flip('r')

        elif _cmd == "flip_left":
            command = f"flip,l"
            if takeoff == True:
                tello.flip('l')

        elif _cmd == "flip_forward":
            command = f"flip,f"
            if takeoff == True:
                tello.flip('f')

        elif _cmd == "flip_backward":
            command = f"flip,b"
            if takeoff == True:
                tello.flip('b')
Exemple #6
0
            def thread1():
                detection_buffer = 10  # buffer between when the drone can respond again to seeing a person
                detect_gate = False  # logic gate before a drone will turn based on seeing someone
                detection_time = float(
                    'inf')  #Getting around initializing error
                det_label = 100
                print("Running Thread 1")
                footage = tello.get_frame_read()
                # print("I got a frame!")
                # cv2.imshow("DetectionResults", footage.frame) #no need to have it up here
                #raw_key = cv2.waitKey(1)

                is_async_mode = False
                wait_key_code = 0
                # ----------------------------------------- 5. Loading model to the plugin -----------------------------------------
                log.info("Loading model to the plugin")
                exec_net = plugin.load(network=net, num_requests=2)

                cur_request_id = 0
                next_request_id = 1
                render_time = 0
                parsing_time = 0

                # ----------------------------------------------- 6. Doing inference -----------------------------------------------
                print(
                    "To close the application, press 'CTRL+C' or any key with focus on the output window"
                )
                print("Here's the type of footage.drone: " +
                      str(type(footage.frame)))
                if is_async_mode:
                    ret = True  #footage.frame
                    next_frame = footage.frame  # In cap.read its 91600 size
                else:
                    ret = True  #footage.frame
                    frame = footage.frame
                    # print ("footage.frame type is: " + str(type(footage.drone)))
                    print("Output Representation: " + str(footage.frame))
                    print("Here's the size of the array: " +
                          str(footage.frame.size))

                # if not ret: #commented due to truth value
                #     print("RET VALUE IS FALSE!")
                #     break

                if is_async_mode:
                    request_id = next_request_id
                    in_frame = cv2.resize(next_frame, (w, h))
                else:
                    request_id = cur_request_id
                    in_frame = cv2.resize(frame, (w, h))

                # resize input_frame to network size
                in_frame = in_frame.transpose(
                    (2, 0, 1))  # Change data layout from HWC to CHW
                in_frame = in_frame.reshape((n, c, h, w))
                #It looks like I copied it twice
                # in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
                # in_frame = in_frame.reshape((n, c, h, w))

                # Start inference
                start_time = time()

                exec_net.start_async(request_id=request_id,
                                     inputs={input_blob: in_frame})
                det_time = time() - start_time

                # Collecting object detection results
                objects = list()
                if exec_net.requests[cur_request_id].wait(-1) == 0:
                    output = exec_net.requests[cur_request_id].outputs

                    start_time = time()
                    for layer_name, out_blob in output.items():
                        layer_params = YoloV3Params(
                            net.layers[layer_name].params, out_blob.shape[2])
                        log.info("Layer {} parameters: ".format(layer_name))
                        layer_params.log_params()
                        objects += parse_yolo_region(out_blob,
                                                     in_frame.shape[2:],
                                                     frame.shape[:-1],
                                                     layer_params,
                                                     args.prob_threshold)
                    parsing_time = time() - start_time

                # Filtering overlapping boxes with respect to the --iou_threshold CLI parameter
                for i in range(len(objects)):
                    if objects[i]['confidence'] == 0:
                        continue
                    for j in range(i + 1, len(objects)):
                        if intersection_over_union(
                                objects[i], objects[j]) > args.iou_threshold:
                            objects[j]['confidence'] = 0

                # Drawing objects with respect to the --prob_threshold CLI parameter
                objects = [
                    obj for obj in objects
                    if obj['confidence'] >= args.prob_threshold
                ]

                if len(objects) and args.raw_output_message:
                    log.info("\nDetected boxes for batch {}:".format(1))
                    log.info(
                        " Class ID | Confidence | XMIN | YMIN | XMAX | YMAX | COLOR "
                    )

                origin_im_size = frame.shape[:-1]

                for obj in objects:
                    # Validation bbox of detected object
                    if obj['xmax'] > origin_im_size[1] or obj[
                            'ymax'] > origin_im_size[0] or obj[
                                'xmin'] < 0 or obj['ymin'] < 0:
                        continue
                    color = (int(min(obj['class_id'] * 12.5,
                                     255)), min(obj['class_id'] * 7, 255),
                             min(obj['class_id'] * 5, 255))
                    det_label = labels_map[obj['class_id']] if labels_map and len(labels_map) >= obj['class_id'] else \
                        str(obj['class_id'])
                    if det_label == 0:
                        print("Configuring tello Drone")
                        tello = Tello()

                        if not tello.connect():
                            print("Tello not connected")
                            return

                        if not tello.streamon():
                            print("Could not start video stream")
                            return
                        # tello.takeoff()

            #Start the drone flight with takeoff (I'm putting a 3 minute limit before the drone comes down)
                        if not tello.takeoff():
                            print("Takeoff failed")

                        log.info("Takeoff should have occured.")

                    if args.raw_output_message:
                        log.info(
                            "{:^9} | {:10f} | {:4} | {:4} | {:4} | {:4} | {} ".
                            format(det_label, obj['confidence'], obj['xmin'],
                                   obj['ymin'], obj['xmax'], obj['ymax'],
                                   color))

                    cv2.rectangle(frame, (obj['xmin'], obj['ymin']),
                                  (obj['xmax'], obj['ymax']), color, 2)
                    cv2.putText(
                        frame, "#" + det_label + ' ' +
                        str(round(obj['confidence'] * 100, 1)) + ' %',
                        (obj['xmin'], obj['ymin'] - 7),
                        cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)

                    print("YOLOV3 IS DETECTING OBJECT: " +
                          str(coco_labels[int(det_label)]))
                    if (int(det_label) == 0):
                        detection_time = time()
                #if int(det_label) == 80:
                #    detection_time = time()
                #    print("seeing nada")
                if (time() - detection_time) > detection_buffer:
                    detect_gate = True

                    #detect_gate = False
    #person not detected.. classification id 80
                if int(det_label) == 0 and detect_gate:
                    detection_time = time()
                    tello.rotate_clockwise(
                        90)  #Current reaction to seeing a drone
                    detect_gate = False
                str1 = str(coco_labels[int(det_label)])
                str2 = 'person'
                if str1 == str2:
                    print("sees person and rotate_clockwise")

                    tello.flip('f')
                    #tello.rotate_clockwise(90)
                # Draw performance stats over frame
                inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
                    "Inference time: {:.3f} ms".format(det_time * 1e3)
                render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                    render_time * 1e3)
                async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
                    "Async mode is off. Processing request {}".format(cur_request_id)
                parsing_message = "YOLO parsing time is {:.3f}".format(
                    parsing_time * 1e3)

                cv2.putText(frame, inf_time_message, (15, 15),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
                cv2.putText(frame, render_time_message, (15, 45),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
                cv2.putText(frame, async_mode_message,
                            (10, int(origin_im_size[0] - 20)),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
                cv2.putText(frame, parsing_message, (15, 30),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)

                if is_async_mode:
                    cur_request_id, next_request_id = next_request_id, cur_request_id
                    frame = next_frame

                key = cv2.waitKey(wait_key_code)

                # Tab key
                # if key == 27:
                #     break
                # ESC key
                if key == 9:
                    exec_net.requests[cur_request_id].wait()
                    is_async_mode = not is_async_mode
                    log.info("Switched to {} mode".format(
                        "async" if is_async_mode else "sync"))
def main():
    args = build_argparser().parse_args()

    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # ------------- 1. Plugin initialization for specified device and load extensions library if specified -------------
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)

    # -------------------- 2. Reading the IR generated by the Model Optimizer (.xml and .bin files) --------------------
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    # ---------------------------------- 3. Load CPU extension for support specific layer ------------------------------
    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.error(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(plugin.device, ', '.join(not_supported_layers)))
            log.error(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    assert len(net.inputs.keys(
    )) == 1, "Sample supports only YOLO V3 based single input topologies"
    assert len(
        net.outputs
    ) == 3, "Sample supports only YOLO V3 based triple output topologies"

    # ---------------------------------------------- 4. Preparing inputs -----------------------------------------------
    log.info("Preparing inputs")
    input_blob = next(iter(net.inputs))

    #  Defaulf batch_size is 1
    net.batch_size = 1

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape

    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    input_stream = 0 if args.input == "cam" else args.input

    is_async_mode = True

    if (args.input != "drone"):
        cap = cv2.VideoCapture(input_stream)
        # Drone insertion

        number_input_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        number_input_frames = 1 if number_input_frames != -1 and number_input_frames < 0 else number_input_frames
        wait_key_code = 1

        # Number of frames in picture is 1 and this will be read in cycle. Sync mode is default value for this case
        if number_input_frames != 1:
            ret, frame = cap.read()  #frames need to be taken from def in tello
            # print("Here's the type of cap.read(): " + str(type(cap.read())))
            # print("Here's the raw read: " + str(cap.read()))
            # print ("Debugging Drone cam Array size is: " + str(ret.size))
        else:
            is_async_mode = False
            wait_key_code = 0

        # ----------------------------------------- 5. Loading model to the plugin -----------------------------------------
        log.info("Loading model to the plugin")
        exec_net = plugin.load(network=net, num_requests=2)

        cur_request_id = 0
        next_request_id = 1
        render_time = 0
        parsing_time = 0

        # ----------------------------------------------- 6. Doing inference -----------------------------------------------
        print(
            "To close the application, press 'CTRL+C' or any key with focus on the output window"
        )
        while cap.isOpened():
            # Here is the first asynchronous point: in the Async mode, we capture frame to populate the NEXT infer request
            # in the regular mode, we capture frame to the CURRENT infer request
            if is_async_mode:
                ret, next_frame = cap.read()

            else:
                ret, frame = cap.read()
            print("Here's the size of the frame array: " + str(frame.size))

            if not ret:
                break

            if is_async_mode:
                request_id = next_request_id
                in_frame = cv2.resize(next_frame, (w, h))
            else:
                request_id = cur_request_id
                in_frame = cv2.resize(frame, (w, h))

            # resize input_frame to network size
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))

            # Start inference
            start_time = time()
            exec_net.start_async(request_id=request_id,
                                 inputs={input_blob: in_frame})
            det_time = time() - start_time

            # Collecting object detection results
            objects = list()
            if exec_net.requests[cur_request_id].wait(-1) == 0:
                output = exec_net.requests[cur_request_id].outputs

                start_time = time()
                for layer_name, out_blob in output.items():
                    layer_params = YoloV3Params(net.layers[layer_name].params,
                                                out_blob.shape[2])
                    log.info("Layer {} parameters: ".format(layer_name))
                    layer_params.log_params()
                    objects += parse_yolo_region(out_blob, in_frame.shape[2:],
                                                 frame.shape[:-1],
                                                 layer_params,
                                                 args.prob_threshold)
                parsing_time = time() - start_time

            # Filtering overlapping boxes with respect to the --iou_threshold CLI parameter
            for i in range(len(objects)):
                if objects[i]['confidence'] == 0:
                    continue
                for j in range(i + 1, len(objects)):
                    if intersection_over_union(
                            objects[i], objects[j]) > args.iou_threshold:
                        objects[j]['confidence'] = 0

            # Drawing objects with respect to the --prob_threshold CLI parameter
            objects = [
                obj for obj in objects
                if obj['confidence'] >= args.prob_threshold
            ]

            if len(objects) and args.raw_output_message:
                log.info("\nDetected boxes for batch {}:".format(1))
                log.info(
                    " Class ID | Confidence | XMIN | YMIN | XMAX | YMAX | COLOR "
                )

            origin_im_size = frame.shape[:-1]
            for obj in objects:
                # Validation bbox of detected object
                if obj['xmax'] > origin_im_size[1] or obj[
                        'ymax'] > origin_im_size[0] or obj['xmin'] < 0 or obj[
                            'ymin'] < 0:
                    continue
                color = (int(min(obj['class_id'] * 12.5,
                                 255)), min(obj['class_id'] * 7,
                                            255), min(obj['class_id'] * 5,
                                                      255))
                det_label = labels_map[obj['class_id']] if labels_map and len(labels_map) >= obj['class_id'] else \
                    str(obj['class_id'])

                if args.raw_output_message:
                    log.info(
                        "{:^9} | {:10f} | {:4} | {:4} | {:4} | {:4} | {} ".
                        format(det_label, obj['confidence'], obj['xmin'],
                               obj['ymin'], obj['xmax'], obj['ymax'], color))

                cv2.rectangle(frame, (obj['xmin'], obj['ymin']),
                              (obj['xmax'], obj['ymax']), color, 2)
                cv2.putText(
                    frame, "#" + det_label + ' ' +
                    str(round(obj['confidence'] * 100, 1)) + ' %',
                    (obj['xmin'], obj['ymin'] - 7), cv2.FONT_HERSHEY_COMPLEX,
                    0.6, color, 1)

            # Draw performance stats over frame
            inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
                "Inference time: {:.3f} ms".format(det_time * 1e3)
            render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                render_time * 1e3)
            async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
                "Async mode is off. Processing request {}".format(cur_request_id)
            parsing_message = "YOLO parsing time is {:.3f}".format(
                parsing_time * 1e3)

            cv2.putText(frame, inf_time_message, (15, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            cv2.putText(frame, render_time_message, (15, 45),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame, async_mode_message,
                        (10, int(origin_im_size[0] - 20)),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame, parsing_message, (15, 30),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)

            start_time = time()
            # cv2.imshow("DetectionResults", frame)
            render_time = time() - start_time

            if is_async_mode:
                cur_request_id, next_request_id = next_request_id, cur_request_id
                frame = next_frame

            key = cv2.waitKey(wait_key_code)

            # Tab key
            if key == 27:
                break
            # ESC key
            if key == 9:
                exec_net.requests[cur_request_id].wait()
                is_async_mode = not is_async_mode
                log.info("Switched to {} mode".format(
                    "async" if is_async_mode else "sync"))

        cv2.destroyAllWindows()


############################################################################
    else:
        detection_buffer = 10  # buffer between when the drone can respond again to seeing a person
        detect_gate = True  # logic gate before a drone will turn based on seeing someone
        detection_time = float('inf')
        det_label = 100
        flight_max = 300

        print("Configuring tello Drone")
        tello = Tello()
        print("Drone battery level: " + str(tello.get_battery()))

        if not tello.connect():
            print("Tello not connected")
            return

        log.info("Loading model to the plugin")
        exec_net = plugin.load(network=net, num_requests=2)

        if not tello.streamon():
            print("Could not start video stream")
            return

        #Start the drone flight with takeoff (I'm putting a 3 minute limit before the drone comes down)
        if not tello.takeoff():
            print("Takeoff failed")

        takeoff_time = time()
        # while(stripper_int(tello.get_height()) > 210) or (stripper_int(tello.get_height()) < 150):
        #     if (stripper_int(tello.get_height()) > 210):
        #         tello.move("down",3)
        #     else:
        #         tello.move("up",3)

        while (True):
            det_label = 100  #set to be initialized each time
            if (time() - takeoff_time) > flight_max:
                print(
                    "Max flight time have been logged - triggering autolanding"
                )
                tello.land()

            footage = tello.get_frame_read()
            log.info("New Frame Loaded")
            # cv2.imshow("DetectionResults", footage.frame) #no need to have it up here
            raw_key = cv2.waitKey(1)

            is_async_mode = False
            wait_key_code = 0
            # ----------------------------------------- 5. Loading model to the plugin -----------------------------------------
            # log.info("Loading model to the plugin")
            # exec_net = plugin.load(network=net, num_requests=2)

            cur_request_id = 0
            next_request_id = 1
            render_time = 0
            parsing_time = 0

            # ----------------------------------------------- 6. Doing inference -----------------------------------------------
            # print("To close the application, press 'CTRL+C' or any key with focus on the output window")
            # print ("Here's the type of footage.drone: " + str(type(footage.frame)))
            if is_async_mode:
                ret = True  #footage.frame
                next_frame = footage.frame  # In cap.read its 91600 size
            else:
                ret = True  #footage.frame
                frame = footage.frame
                # print ("footage.frame type is: " + str(type(footage.drone)))
                # print("Output Representation: " + str(footage.frame))
                # print("Here's the size of the array: " + str(footage.frame.size))

            if not ret:  #commented due to truth value
                print("RET VALUE IS FALSE!")
                break

            if is_async_mode:
                request_id = next_request_id
                in_frame = cv2.resize(next_frame, (w, h))
            else:
                request_id = cur_request_id
                in_frame = cv2.resize(frame, (w, h))

            # resize input_frame to network size
            in_frame = in_frame.transpose(
                (2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            #It looks like I copied it twice
            # in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
            # in_frame = in_frame.reshape((n, c, h, w))

            # Start inference
            start_time = time()

            exec_net.start_async(request_id=request_id,
                                 inputs={input_blob: in_frame})
            det_time = time() - start_time

            # Collecting object detection results
            objects = list()
            if exec_net.requests[cur_request_id].wait(-1) == 0:
                output = exec_net.requests[cur_request_id].outputs

                start_time = time()
                for layer_name, out_blob in output.items():
                    layer_params = YoloV3Params(net.layers[layer_name].params,
                                                out_blob.shape[2])
                    log.info("Layer {} parameters: ".format(layer_name))
                    layer_params.log_params()
                    objects += parse_yolo_region(out_blob, in_frame.shape[2:],
                                                 frame.shape[:-1],
                                                 layer_params,
                                                 args.prob_threshold)
                parsing_time = time() - start_time

            # Filtering overlapping boxes with respect to the --iou_threshold CLI parameter
            for i in range(len(objects)):
                if objects[i]['confidence'] == 0:
                    continue
                for j in range(i + 1, len(objects)):
                    if intersection_over_union(
                            objects[i], objects[j]) > args.iou_threshold:
                        objects[j]['confidence'] = 0

            # Drawing objects with respect to the --prob_threshold CLI parameter
            objects = [
                obj for obj in objects
                if obj['confidence'] >= args.prob_threshold
            ]

            if len(objects) and args.raw_output_message:
                log.info("\nDetected boxes for batch {}:".format(1))
                log.info(
                    " Class ID | Confidence | XMIN | YMIN | XMAX | YMAX | COLOR "
                )

            origin_im_size = frame.shape[:-1]
            for obj in objects:
                # Validation bbox of detected object
                if obj['xmax'] > origin_im_size[1] or obj[
                        'ymax'] > origin_im_size[0] or obj['xmin'] < 0 or obj[
                            'ymin'] < 0:
                    continue
                color = (int(min(obj['class_id'] * 12.5,
                                 255)), min(obj['class_id'] * 7,
                                            255), min(obj['class_id'] * 5,
                                                      255))
                det_label = labels_map[obj['class_id']] if labels_map and len(labels_map) >= obj['class_id'] else \
                    str(obj['class_id'])

                if args.raw_output_message:
                    log.info(
                        "{:^9} | {:10f} | {:4} | {:4} | {:4} | {:4} | {} ".
                        format(det_label, obj['confidence'], obj['xmin'],
                               obj['ymin'], obj['xmax'], obj['ymax'], color))

                cv2.rectangle(frame, (obj['xmin'], obj['ymin']),
                              (obj['xmax'], obj['ymax']), color, 2)
                cv2.putText(
                    frame, "#" + det_label + ' ' +
                    str(round(obj['confidence'] * 100, 1)) + ' %',
                    (obj['xmin'], obj['ymin'] - 7), cv2.FONT_HERSHEY_COMPLEX,
                    0.6, color, 1)
            print("Drone is seeing: " + str(coco_labels[int(det_label)]))

            if (time() - detection_time) > detection_buffer:
                detect_gate = True

            if int(det_label) == 0 and detect_gate:
                detection_time = time()
                tello.flip('f')  #Current reaction to seeing a drone
                detect_gate = False

            # Draw performance stats over frame
            inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
                "Inference time: {:.3f} ms".format(det_time * 1e3)
            render_time_message = "OpenCV rendering time: {:.3f} ms".format(
                render_time * 1e3)
            async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
                "Async mode is off. Processing request {}".format(cur_request_id)
            parsing_message = "YOLO parsing time is {:.3f}".format(
                parsing_time * 1e3)

            cv2.putText(frame, inf_time_message, (15, 15),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            cv2.putText(frame, render_time_message, (15, 45),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame, async_mode_message,
                        (10, int(origin_im_size[0] - 20)),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame, parsing_message, (15, 30),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)

            start_time = time()
            # cv2.imshow("DetectionResults", frame) #Debug Testing
            render_time = time() - start_time

            if is_async_mode:
                cur_request_id, next_request_id = next_request_id, cur_request_id
                frame = next_frame

            key = cv2.waitKey(wait_key_code)

            # Tab key
            if key == 27:
                break
            # ESC key
            if key == 9:
                exec_net.requests[cur_request_id].wait()
                is_async_mode = not is_async_mode
                log.info("Switched to {} mode".format(
                    "async" if is_async_mode else "sync"))

            if raw_key == 27:  #esc is break key
                break
            # number_input_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            # number_input_frames = 1 if number_input_frames != -1 and number_input_frames < 0 else number_input_frames

            # wait_key_code = 1 #causing issues in compile

            # Number of frames in picture is 1 and this will be read in cycle. Sync mode is default value for this case
            # if number_input_frames != 1:
            # ret, frame = cap.read() #frames need to be taken from def in tello
            # else:
            # is_async_mode = False
            # wait_key_code = 0

        cv2.destroyAllWindows()
        tello.end()