class FrontEnd(object): def __init__(self): # 드론과 상호작용하는 Tello 객체 self.tello = Tello() # 드론의 속도 (-100~100) #수직, 수평 속도 self.for_back_velocity = 0 self.left_right_velocity = 0 self.up_down_velocity = 0 self.yaw_velocity = 0 self.speed = 10 self.send_rc_control = False # 실행 함수 def run(self): #드론이 연결이 되지 않으면 함수 종료 if not self.tello.connect(): print("Tello not connected") return #drone의 제한속도가 적절하지 않은 경우 if not self.tello.set_speed(self.speed): print("Not set speed to lowest possible") return # 프로그램을 비정상적인 방법으로 종료를 시도하여 비디오 화면이 꺼지지 않은 경우 종료. if not self.tello.streamoff(): print("Could not stop video stream") return # 비디오가 켜지지않는 경우 종료. if not self.tello.streamon(): print("Could not start video stream") return #프레임 단위로 인식 frame_read = self.tello.get_frame_read() should_stop = False imgCount = 0 OVERRIDE = False oSpeed = args.override_speed tDistance = args.distance self.tello.get_battery() # X축 안전 범위 szX = args.saftey_x # Y축 안전 범위 szY = args.saftey_y #디버깅 모드 if args.debug: print("DEBUG MODE ENABLED!") #비행을 멈취야할 상황이 주어지지 않은 경우 while not should_stop: self.update() #프레임 입력이 멈췄을 경우 while문 탈출 if frame_read.stopped: frame_read.stop() break theTime = str(datetime.datetime.now()).replace(':', '-').replace( '.', '_') frame = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB) frameRet = frame_read.frame vid = self.tello.get_video_capture() #저장할 경우 if args.save_session: cv2.imwrite("{}/tellocap{}.jpg".format(ddir, imgCount), frameRet) frame = np.rot90(frame) imgCount += 1 time.sleep(1 / FPS) # 키보드 입력을 기다림 k = cv2.waitKey(20) # 0을 눌러서 거리를 0으로 설정 if k == ord('0'): if not OVERRIDE: print("Distance = 0") tDistance = 0 # 1을 눌러서 거리를 1으로 설정 if k == ord('1'): if OVERRIDE: oSpeed = 1 else: print("Distance = 1") tDistance = 1 # 2을 눌러서 거리를 2으로 설정 if k == ord('2'): if OVERRIDE: oSpeed = 2 else: print("Distance = 2") tDistance = 2 # 3을 눌러서 거리를 3으로 설정 if k == ord('3'): if OVERRIDE: oSpeed = 3 else: print("Distance = 3") tDistance = 3 # 4을 눌러서 거리를 4으로 설정 if k == ord('4'): if not OVERRIDE: print("Distance = 4") tDistance = 4 # 5을 눌러서 거리를 5으로 설정 if k == ord('5'): if not OVERRIDE: print("Distance = 5") tDistance = 5 # 6을 눌러서 거리를 6으로 설정 if k == ord('6'): if not OVERRIDE: print("Distance = 6") tDistance = 6 # T를 눌러서 이륙 if k == ord('t'): if not args.debug: print("Taking Off") self.tello.takeoff() self.tello.get_battery() self.send_rc_control = True # L을 눌러서 착륙 if k == ord('l'): if not args.debug: print("Landing") self.tello.land() self.send_rc_control = False # Backspace를 눌러서 명령을 덮어씀 if k == 8: if not OVERRIDE: OVERRIDE = True print("OVERRIDE ENABLED") else: OVERRIDE = False print("OVERRIDE DISABLED") if OVERRIDE: # S & W 눌러서 앞 & 뒤로 비행 if k == ord('w'): self.for_back_velocity = int(S * oSpeed) elif k == ord('s'): self.for_back_velocity = -int(S * oSpeed) else: self.for_back_velocity = 0 # a & d 를 눌러서 왼쪽 & 오른쪽으로 회전 if k == ord('d'): self.yaw_velocity = int(S * oSpeed) elif k == ord('a'): self.yaw_velocity = -int(S * oSpeed) else: self.yaw_velocity = 0 # Q & E 를 눌러서 위 & 아래로 비행 if k == ord('e'): self.up_down_velocity = int(S * oSpeed) elif k == ord('q'): self.up_down_velocity = -int(S * oSpeed) else: self.up_down_velocity = 0 # c & z 를 눌러서 왼쪽 & 오른쪽으로 비행 if k == ord('c'): self.left_right_velocity = int(S * oSpeed) elif k == ord('z'): self.left_right_velocity = -int(S * oSpeed) else: self.left_right_velocity = 0 # 프로그램 종료 if k == 27: should_stop = True break gray = cv2.cvtColor(frameRet, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=2) # 대상 크기 tSize = faceSizes[tDistance] # 중심 차원들 cWidth = int(dimensions[0] / 2) cHeight = int(dimensions[1] / 2) noFaces = len(faces) == 0 # 컨트롤을 얻고, 얼굴 좌표 등을 얻으면 if self.send_rc_control and not OVERRIDE: for (x, y, w, h) in faces: # roi_gray = gray[y:y + h, x:x + w] #(ycord_start, ycord_end) roi_color = frameRet[y:y + h, x:x + w] # 얼굴 상자 특성 설정 fbCol = (255, 0, 0) #BGR 0-255 fbStroke = 2 # 끝 좌표들은 x와 y를 제한하는 박스의 끝에 존재 end_cord_x = x + w end_cord_y = y + h end_size = w * 2 # 목표 좌표들 targ_cord_x = int((end_cord_x + x) / 2) targ_cord_y = int((end_cord_y + y) / 2) + UDOffset # 얼굴에서 화면 중심까지의 벡터를 계산 vTrue = np.array((cWidth, cHeight, tSize)) vTarget = np.array((targ_cord_x, targ_cord_y, end_size)) vDistance = vTrue - vTarget # if not args.debug: # 회전 if vDistance[0] < -szX: self.yaw_velocity = S # self.left_right_velocity = S2 elif vDistance[0] > szX: self.yaw_velocity = -S # self.left_right_velocity = -S2 else: self.yaw_velocity = 0 # 위 & 아래 (상승/하강) if vDistance[1] > szY: self.up_down_velocity = S elif vDistance[1] < -szY: self.up_down_velocity = -S else: self.up_down_velocity = 0 F = 0 if abs(vDistance[2]) > acc[tDistance]: F = S # 앞, 뒤 if vDistance[2] > 0: self.for_back_velocity = S + F elif vDistance[2] < 0: self.for_back_velocity = -S - F else: self.for_back_velocity = 0 # 얼굴 테두리 박스를 그림 cv2.rectangle(frameRet, (x, y), (end_cord_x, end_cord_y), fbCol, fbStroke) # 목표를 원으로 그림 cv2.circle(frameRet, (targ_cord_x, targ_cord_y), 10, (0, 255, 0), 2) # 안전 구역을 그림 cv2.rectangle(frameRet, (targ_cord_x - szX, targ_cord_y - szY), (targ_cord_x + szX, targ_cord_y + szY), (0, 255, 0), fbStroke) # 드론의 얼굴 상자로부터의 상대적 벡터 위치를 구함. cv2.putText(frameRet, str(vDistance), (0, 64), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) # 인식되는 얼굴이 없으면 아무것도 안함. if noFaces: self.yaw_velocity = 0 self.up_down_velocity = 0 self.for_back_velocity = 0 print("NO TARGET") # 화면의 중심을 그림. 드론이 목표 좌표와 맞추려는 대상이 됨. cv2.circle(frameRet, (cWidth, cHeight), 10, (0, 0, 255), 2) dCol = lerp(np.array((0, 0, 255)), np.array((255, 255, 255)), tDistance + 1 / 7) if OVERRIDE: show = "OVERRIDE: {}".format(oSpeed) dCol = (255, 255, 255) else: show = "AI: {}".format(str(tDistance)) # 선택된 거리를 그림 cv2.putText(frameRet, show, (32, 664), cv2.FONT_HERSHEY_SIMPLEX, 1, dCol, 2) # 결과 프레임을 보여줌. cv2.imshow(f'Tello Tracking...', frameRet) # 종료시에 배터리를 출력 self.tello.get_battery() # 전부 완료되면 캡쳐를 해제함. cv2.destroyAllWindows() # 종료 전에 항상 호출. 자원들을 해제함. self.tello.end() def battery(self): return self.tello.get_battery()[:2] def update(self): """ Update routine. Send velocities to Tello.""" if self.send_rc_control: self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity, self.up_down_velocity, self.yaw_velocity)
class DroneUI(object): def __init__(self): # Init Tello object that interacts with the Tello drone self.tello = Tello() # Drone velocities between -100~100 self.for_back_velocity = 0 self.left_right_velocity = 0 self.up_down_velocity = 0 self.yaw_velocity = 0 self.speed = 10 self.mode = PMode.NONE # Can be '', 'FIND', 'OVERRIDE' or 'FOLLOW' self.send_rc_control = False def run(self): if not self.tello.connect(): print("Tello not connected") return if not self.tello.set_speed(self.speed): print("Not set speed to lowest possible") return # In case streaming is on. This happens when we quit this program without the escape key. if not self.tello.streamoff(): print("Could not stop video stream") return if not self.tello.streamon(): print("Could not start video stream") return if args.cat == 'any': print('Using CatDetectionModel') self.model = CatDetectionModel(0.5) else: print('Using MyCatsDetectionModel ({})'.format(args.cat)) self.model = MyCatsDetectionModel(0.5) frame_read = self.tello.get_frame_read() should_stop = False imgCount = 0 OVERRIDE = False DETECT_ENABLED = False # Set to true to automatically start in follow mode self.mode = PMode.NONE self.tello.get_battery() # Safety Zone X szX = args.saftey_x # Safety Zone Y szY = args.saftey_y if args.debug: print("DEBUG MODE ENABLED!") while not should_stop: frame_time_start = time.time() # self.update() # Moved to the end before sleep to have more accuracy if frame_read.stopped: frame_read.stop() self.update() ## Just in case break print('---') # TODO: Analize if colors have to be tweaked frame = cv2.flip(frame_read.frame, 0) # Vertical flip due to the mirror frameRet = frame.copy() vid = self.tello.get_video_capture() imgCount += 1 #time.sleep(1 / FPS) # Listen for key presses k = cv2.waitKey(20) try: if chr(k) in 'ikjluoyhp': OVERRIDE = True except: ... if k == ord('e'): DETECT_ENABLED = True elif k == ord('d'): DETECT_ENABLED = False # Press T to take off if k == ord('t'): if not args.debug: print("Taking Off") self.tello.takeoff() self.tello.get_battery() self.send_rc_control = True if k == ord('s') and self.send_rc_control == True: self.mode = PMode.FIND DETECT_ENABLED = True # To start following with autopilot OVERRIDE = False print('Switch to spiral mode') # This is temporary, follow mode should start automatically if k == ord('f') and self.send_rc_control == True: DETECT_ENABLED = True OVERRIDE = False print('Switch to follow mode') # Press L to land if k == ord('g'): self.land_and_set_none() # self.update() ## Just in case # break # Press Backspace for controls override if k == 8: if not OVERRIDE: OVERRIDE = True print("OVERRIDE ENABLED") else: OVERRIDE = False print("OVERRIDE DISABLED") # Quit the software if k == 27: should_stop = True self.update() ## Just in case break autoK = -1 if k == -1 and self.mode == PMode.FIND: if not OVERRIDE: autoK = next_auto_key() if autoK == -1: self.mode = PMode.NONE print('Queue empty! no more autokeys') else: print('Automatically pressing ', chr(autoK)) key_to_process = autoK if k == -1 and self.mode == PMode.FIND and OVERRIDE == False else k if self.mode == PMode.FIND and not OVERRIDE: #frame ret will get the squares drawn after this operation if self.process_move_key_andor_square_bounce( key_to_process, frame, frameRet) == False: # If the queue is empty and the object hasn't been found, land and finish self.land_and_set_none() #self.update() # Just in case break else: self.process_move_key(key_to_process) dCol = (0, 255, 255) #detected = False if not OVERRIDE and self.send_rc_control and DETECT_ENABLED: self.detect_subjects(frame, frameRet, szX, szY) show = "" if OVERRIDE: show = "MANUAL" dCol = (255, 255, 255) elif self.mode == PMode.FOLLOW or self.mode == PMode.FLIP: show = "FOUND!!!" elif self.mode == PMode.FIND: show = "Finding.." mode_label = 'Mode: {}'.format(self.mode) # Draw the distance choosen cv2.putText(frameRet, mode_label, (32, 664), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) cv2.putText(frameRet, show, (32, 600), cv2.FONT_HERSHEY_SIMPLEX, 1, dCol, 2) # Display the resulting frame cv2.imshow('FINDER DRONE', frameRet) if (self.mode == PMode.FLIP): self.flip() # OVERRIDE = True self.update( ) # Moved here instead of beginning of loop to have better accuracy frame_time = time.time() - frame_time_start sleep_time = 1 / FPS - frame_time if sleep_time < 0: sleep_time = 0 print('SLEEEP TIME NEGATIVE FOR FRAME {} ({}s).. TURNING IT 0'. format(imgCount, frame_time)) if args.save_session and self.send_rc_control == True: # To avoid recording before takeoff self.create_frame_files(frame, frameRet, imgCount) time.sleep(sleep_time) # On exit, print the battery self.tello.get_battery() # When everything done, release the capture # cv2.destroyWindow('FINDER DRONE') # cv2.waitKey(0) cv2.destroyAllWindows() # Call it always before finishing. I deallocate resources. self.tello.end() def create_frame_files(self, frame, frameRet, imgCount): def create_frame_file(image, subdir, print_log=False): global ddir path = ddir + '/' + subdir if not os.path.exists(path): os.makedirs(path) filename = "{}/tellocap{}.jpg".format(path, imgCount) if print_log: print('Created {}'.format(filename)) cv2.imwrite(filename, image) create_frame_file(frame, 'raw') create_frame_file(frameRet, 'output', True) def flip(self): print('Flip!') self.left_right_velocity = self.for_back_velocity = 0 self.update() time.sleep(self.tello.TIME_BTW_COMMANDS * 2) if not args.debug: self.tello.flip_left() #self.tello.flip_right() # The following 2 lines allow going back to follow mode self.mode = PMode.FOLLOW global onFoundAction onFoundAction = PMode.FOLLOW # So it doesn't flip over and over def land_and_set_none(self): if not args.debug: print("------------------Landing--------------------") self.tello.land() self.send_rc_control = False self.mode = PMode.NONE # TODO: Consider calling reset def oq_discard_keys(self, keys_to_pop): oq = globals.mission.operations_queue keys_to_pop += 'p' while len(oq) > 0: oqkey = oq[0]['key'] if oqkey in keys_to_pop: print('Removing {} from queue'.format(oqkey)) oq.popleft() else: break def process_move_key_andor_square_bounce(self, k, frame, frameRet=None): self.process_move_key(k) # By default use key direction (hor_dir, ver_dir) = get_squares_push_directions(frame, frameRet) print('(hor_dir, ver_dir): ({}, {})'.format(hor_dir, ver_dir)) oq = globals.mission.operations_queue print('operations_queue len: ', len(oq)) keys_to_pop = '' if ver_dir == 'forward': self.for_back_velocity = int(S) if k != ord('i'): print('Square pushing forward') keys_to_pop += 'k' elif ver_dir == 'back': self.for_back_velocity = -int(S) if k != ord('k'): print('Square pushing back') keys_to_pop += 'i' if hor_dir == 'right': self.left_right_velocity = int(S) if k != ord('l'): print('Square pushing right') keys_to_pop += 'j' elif hor_dir == 'left': self.left_right_velocity = -int(S) if k != ord('j'): print('Square pushing left') keys_to_pop += 'l' if (len(keys_to_pop) > 0): self.oq_discard_keys(keys_to_pop) return (len(oq) > 0) def process_move_key(self, k): # i & k to fly forward & back if k == ord('i'): self.for_back_velocity = int(S) elif k == ord('k'): self.for_back_velocity = -int(S) else: self.for_back_velocity = 0 # o & u to pan left & right if k == ord('o'): self.yaw_velocity = int(S) elif k == ord('u'): self.yaw_velocity = -int(S) else: self.yaw_velocity = 0 # y & h to fly up & down if k == ord('y'): self.up_down_velocity = int(S) elif k == ord('h'): self.up_down_velocity = -int(S) else: self.up_down_velocity = 0 # l & j to fly left & right if k == ord('l'): self.left_right_velocity = int(S) elif k == ord('j'): self.left_right_velocity = -int(S) else: self.left_right_velocity = 0 # p to keep still if k == ord('p'): print('pressing p') def show_save_detection(self, frame, frameRet, firstDetection): output_filename_det_full = "{}/detected_full.jpg".format(ddir) cv2.imwrite(output_filename_det_full, frameRet) print('Created {}'.format(output_filename_det_full)) (x, y, w, h) = firstDetection['box'] add_to_borders = 100 (xt, yt) = (x + w + add_to_borders, y + h + add_to_borders) (x, y) = (max(0, x - add_to_borders), max(0, y - add_to_borders)) # subframeRet = frameRet[y:yt, x:xt].copy() subframe = frame[y:yt, x:xt].copy() def show_detection(): output_filename_det_sub = "{}/detected_sub.jpg".format(ddir) cv2.imwrite(output_filename_det_sub, subframe) print('Created {}'.format(output_filename_det_sub)) # Shows detection in a window. If it doesn't exist yet, waitKey waitForKey = cv2.getWindowProperty('Detected', 0) < 0 # True for first time cv2.imshow('Detected', subframe) if waitForKey: cv2.waitKey(0) Timer(0.5, show_detection).start() def detect_subjects(self, frame, frameRet, szX, szY): detections = self.model.detect(frameRet) # print('detections: ', detections) self.model.drawDetections(frameRet, detections) class_wanted = 0 if args.cat == 'any' else self.model.LABELS.index( args.cat) detection = next( filter(lambda d: d['classID'] == class_wanted, detections), None) isSubjectDetected = not detection is None if isSubjectDetected: print('{} FOUND!!!!!!!!!!'.format(self.model.LABELS[class_wanted])) #if self.mode != onFoundAction: # To create it only the first time self.mode = onFoundAction # if we've given rc controls & get object coords returned # if self.send_rc_control and not OVERRIDE: if self.mode == PMode.FOLLOW: self.follow(detection, frameRet, szX, szY) self.show_save_detection(frame, frameRet, detection) elif self.mode == onFoundAction: # if there are no objects detected, don't do anything print("CAT NOT DETECTED NOW") return isSubjectDetected def follow(self, detection, frameRet, szX, szY): print('Following...') # These are our center dimensions (frame_h, frame_w) = frameRet.shape[:2] cWidth = int(frame_w / 2) cHeight = int(frame_h / 2) (x, y, w, h) = detection['box'] # end coords are the end of the bounding box x & y end_cord_x = x + w end_cord_y = y + h # This is not face detection so we don't need offset UDOffset = 0 # these are our target coordinates targ_cord_x = int((end_cord_x + x) / 2) targ_cord_y = int((end_cord_y + y) / 2) + UDOffset # This calculates the vector from the object to the center of the screen vTrue = np.array((cWidth, cHeight)) vTarget = np.array((targ_cord_x, targ_cord_y)) vDistance = vTrue - vTarget if True or not args.debug: if vDistance[0] < -szX: # Right self.left_right_velocity = S elif vDistance[0] > szX: # Left self.left_right_velocity = -S else: self.left_right_velocity = 0 # for up & down if vDistance[1] > szY: self.for_back_velocity = S elif vDistance[1] < -szY: self.for_back_velocity = -S else: self.for_back_velocity = 0 # Draw the center of screen circle, this is what the drone tries to match with the target coords cv2.circle(frameRet, (cWidth, cHeight), 10, (0, 0, 255), 2) # Draw the target as a circle cv2.circle(frameRet, (targ_cord_x, targ_cord_y), 10, (0, 255, 0), 2) # Draw the safety zone obStroke = 2 cv2.rectangle(frameRet, (targ_cord_x - szX, targ_cord_y - szY), (targ_cord_x + szX, targ_cord_y + szY), (0, 255, 0), obStroke) # Draw the estimated drone vector position in relation to object bounding box cv2.putText(frameRet, str(vDistance), (0, 64), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) def battery(self): return self.tello.get_battery()[:2] def update(self): """ Update routine. Send velocities to Tello.""" if self.send_rc_control: print('Sending speeds to tello. H: {} V: {}'.format( self.left_right_velocity, self.for_back_velocity)) if not args.debug: self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity, self.up_down_velocity, self.yaw_velocity)
class FrontEnd(object): def __init__(self): # Init Tello object that interacts with the Tello drone self.tello = Tello() # Drone velocities between -100~100 self.for_back_velocity = 0 self.left_right_velocity = 0 self.up_down_velocity = 0 self.yaw_velocity = 0 self.speed = 10 self.send_rc_control = False def run(self): if not self.tello.connect(): print("Tello not connected") return if not self.tello.set_speed(self.speed): print("Not set speed to lowest possible") return # In case streaming is on. This happens when we quit this program without the escape key. if not self.tello.streamoff(): print("Could not stop video stream") return if not self.tello.streamon(): print("Could not start video stream") return frame_read = self.tello.get_frame_read() should_stop = False imgCount = 0 OVERRIDE = False oSpeed = args.override_speed tDistance = args.distance self.tello.get_battery() # Safety Zone X szX = args.saftey_x # Safety Zone Y szY = args.saftey_y if args.debug: print("DEBUG MODE ENABLED!") while not should_stop: self.update() if frame_read.stopped: frame_read.stop() break theTime = str(datetime.datetime.now()).replace(':', '-').replace( '.', '_') frame = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB) frameRet = frame_read.frame vid = self.tello.get_video_capture() if args.save_session: cv2.imwrite("{}/tellocap{}.jpg".format(ddir, imgCount), frameRet) frame = np.rot90(frame) imgCount += 1 time.sleep(1 / FPS) # Listen for key presses k = cv2.waitKey(20) # Press 0 to set distance to 0 if k == ord('0'): if not OVERRIDE: print("Distance = 0") tDistance = 0 # Press 1 to set distance to 1 if k == ord('1'): if OVERRIDE: oSpeed = 1 else: print("Distance = 1") tDistance = 1 # Press 2 to set distance to 2 if k == ord('2'): if OVERRIDE: oSpeed = 2 else: print("Distance = 2") tDistance = 2 # Press 3 to set distance to 3 if k == ord('3'): if OVERRIDE: oSpeed = 3 else: print("Distance = 3") tDistance = 3 # Press 4 to set distance to 4 if k == ord('4'): if not OVERRIDE: print("Distance = 4") tDistance = 4 # Press 5 to set distance to 5 if k == ord('5'): if not OVERRIDE: print("Distance = 5") tDistance = 5 # Press 6 to set distance to 6 if k == ord('6'): if not OVERRIDE: print("Distance = 6") tDistance = 6 # Press T to take off if k == ord('t'): if not args.debug: print("Taking Off") self.tello.takeoff() self.tello.get_battery() self.send_rc_control = True # Press L to land if k == ord('l'): if not args.debug: print("Landing") self.tello.land() self.send_rc_control = False # Press Backspace for controls override if k == 8: if not OVERRIDE: OVERRIDE = True print("OVERRIDE ENABLED") else: OVERRIDE = False print("OVERRIDE DISABLED") if OVERRIDE: # S & W to fly forward & back if k == ord('w'): self.for_back_velocity = int(S * oSpeed) elif k == ord('s'): self.for_back_velocity = -int(S * oSpeed) else: self.for_back_velocity = 0 # a & d to pan left & right if k == ord('d'): self.yaw_velocity = int(S * oSpeed) elif k == ord('a'): self.yaw_velocity = -int(S * oSpeed) else: self.yaw_velocity = 0 # Q & E to fly up & down if k == ord('e'): self.up_down_velocity = int(S * oSpeed) elif k == ord('q'): self.up_down_velocity = -int(S * oSpeed) else: self.up_down_velocity = 0 # c & z to fly left & right if k == ord('c'): self.left_right_velocity = int(S * oSpeed) elif k == ord('z'): self.left_right_velocity = -int(S * oSpeed) else: self.left_right_velocity = 0 # Quit the software if k == 27: should_stop = True break gray = cv2.cvtColor(frameRet, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=2) # Target size tSize = faceSizes[tDistance] # These are our center dimensions cWidth = int(dimensions[0] / 2) cHeight = int(dimensions[1] / 2) noFaces = len(faces) == 0 # if we've given rc controls & get face coords returned if self.send_rc_control and not OVERRIDE: for (x, y, w, h) in faces: # roi_gray = gray[y:y + h, x:x + w] #(ycord_start, ycord_end) roi_color = frameRet[y:y + h, x:x + w] # setting Face Box properties fbCol = (255, 0, 0) #BGR 0-255 fbStroke = 2 # end coords are the end of the bounding box x & y end_cord_x = x + w end_cord_y = y + h end_size = w * 2 # these are our target coordinates targ_cord_x = int((end_cord_x + x) / 2) targ_cord_y = int((end_cord_y + y) / 2) + UDOffset # This calculates the vector from your face to the center of the screen vTrue = np.array((cWidth, cHeight, tSize)) vTarget = np.array((targ_cord_x, targ_cord_y, end_size)) vDistance = vTrue - vTarget # if not args.debug: # for turning if vDistance[0] < -szX: self.yaw_velocity = S # self.left_right_velocity = S2 elif vDistance[0] > szX: self.yaw_velocity = -S # self.left_right_velocity = -S2 else: self.yaw_velocity = 0 # for up & down if vDistance[1] > szY: self.up_down_velocity = S elif vDistance[1] < -szY: self.up_down_velocity = -S else: self.up_down_velocity = 0 F = 0 if abs(vDistance[2]) > acc[tDistance]: F = S # for forward back if vDistance[2] > 0: self.for_back_velocity = S + F elif vDistance[2] < 0: self.for_back_velocity = -S - F else: self.for_back_velocity = 0 # Draw the face bounding box cv2.rectangle(frameRet, (x, y), (end_cord_x, end_cord_y), fbCol, fbStroke) # Draw the target as a circle cv2.circle(frameRet, (targ_cord_x, targ_cord_y), 10, (0, 255, 0), 2) # Draw the safety zone cv2.rectangle(frameRet, (targ_cord_x - szX, targ_cord_y - szY), (targ_cord_x + szX, targ_cord_y + szY), (0, 255, 0), fbStroke) # Draw the estimated drone vector position in relation to face bounding box cv2.putText(frameRet, str(vDistance), (0, 64), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) # if there are no faces detected, don't do anything if noFaces: self.yaw_velocity = 0 self.up_down_velocity = 0 self.for_back_velocity = 0 print("NO TARGET") # Draw the center of screen circle, this is what the drone tries to match with the target coords cv2.circle(frameRet, (cWidth, cHeight), 10, (0, 0, 255), 2) dCol = lerp(np.array((0, 0, 255)), np.array((255, 255, 255)), tDistance + 1 / 7) if OVERRIDE: show = "OVERRIDE: {}".format(oSpeed) dCol = (255, 255, 255) else: show = "AI: {}".format(str(tDistance)) # Draw the distance choosen cv2.putText(frameRet, show, (32, 664), cv2.FONT_HERSHEY_SIMPLEX, 1, dCol, 2) # Display the resulting frame cv2.imshow(f'Tello Tracking...', frameRet) # On exit, print the battery self.tello.get_battery() # When everything done, release the capture cv2.destroyAllWindows() # Call it always before finishing. I deallocate resources. self.tello.end() def battery(self): return self.tello.get_battery()[:2] def update(self): """ Update routine. Send velocities to Tello.""" if self.send_rc_control: self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity, self.up_down_velocity, self.yaw_velocity)
class FrontEnd(object): def __init__(self): # Init Tello object that interacts with the Tello drone self.tello = Tello() # Drone velocities between -100~100 self.for_back_velocity = 0 self.left_right_velocity = 0 self.up_down_velocity = 0 self.yaw_velocity = 0 self.speed = 10 self.safe_zone = args.SafeZone self.oSpeed = args.override_speed self.send_rc_control = False def run(self): if not self.tello.connect(): print("Tello not connected") return if not self.tello.set_speed(self.speed): print("Not set speed to lowest possible") return # In case streaming is on. This happens when we quit this program without the escape key. if not self.tello.streamoff(): print("Could not stop video stream") return if not self.tello.streamon(): print("Could not start video stream") return should_stop = False imgCount = 0 OVERRIDE = False self.tello.get_battery() result = cv2.VideoWriter('Prueba_Tello.avi', cv2.VideoWriter_fourcc(*'MJPG'), 10, dimensions) vid = self.tello.get_video_capture() if args.debug: print("DEBUG MODE ENABLED!") while not should_stop: #self.update() #time.sleep(1 / FPS) # Listen for key presses k = cv2.waitKey(20) self.Set_Action(k, OVERRIDE) if OVERRIDE: self.Action_OVERRRIDE(k) # Quit the software if k == 27: should_stop = True break if vid.isOpened(): ret, image = vid.read() imgCount += 1 if imgCount % 2 != 0: continue if ret: image = imutils.resize(image, width=min(350, image.shape[1])) # Detecting all the regions # in the Image that has a # pedestrians inside it (regions, weigths) = hog.detectMultiScale(image, winStride=(4, 4), padding=(4, 4), scale=1.1) # Safety Zone Z szZ = Safe_Zones[self.safe_zone][2] # Safety Zone X szX = Safe_Zones[self.safe_zone][0] # Safety Zone Y szY = Safe_Zones[self.safe_zone][1] center = (int(image.shape[1] / 2), int(image.shape[0] / 2)) # if we've given rc controls & get body coords returned if self.send_rc_control and not OVERRIDE: if len(regions) > 0: max_index = np.where(weigths == np.amax(weigths)) (x, y, w, h) = regions[max_index[0][0]] cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2) # points person = (int(x + w / 2), int(y + h / 2)) distance = (person[0] - center[0], center[1] - person[1]) theta0 = 86.6 B = image.shape[1] person_width = w # z theta1 = theta0 * (2 * abs(distance[0]) + person_width) / (2 * B) z = (2 * abs(distance[0]) + person_width) / ( 2 * math.tan(math.radians(abs(theta1)))) distance = (int(distance[0]), int(distance[1]), int(z)) if not args.debug: # for turning self.update() if distance[0] < -szX: self.yaw_velocity = S # self.left_right_velocity = S2 elif distance[0] > szX: self.yaw_velocity = -S # self.left_right_velocity = -S2 else: self.yaw_velocity = 0 # for up & down if distance[1] > szY: self.up_down_velocity = S elif distance[1] < -szY: self.up_down_velocity = -S else: self.up_down_velocity = 0 F = 0 if abs(distance[2]) > acc[self.safe_zone]: F = S # for forward back if distance[2] > szZ: self.for_back_velocity = S + F elif distance[2] < szZ: self.for_back_velocity = -S - F else: self.for_back_velocity = 0 cv2.line(image, center, (center[0] + distance[0], center[1]), (255, 0, 0)) cv2.line(image, (center[0] + distance[0], center[1]), person, (0, 255, 0)) cv2.circle(image, center, 3, (0, 255, 0)) cv2.circle(image, person, 3, (0, 0, 255)) cv2.putText( image, "d:" + str(distance), (0, 20), 2, 0.7, (0, 0, 0), ) # if there are no body detected, don't do anything else: self.yaw_velocity = 0 self.up_down_velocity = 0 self.for_back_velocity = 0 print("NO TARGET") dCol = lerp(np.array((0, 0, 255)), np.array( (255, 255, 255)), self.safe_zone + 1 / 7) if OVERRIDE: show = "OVERRIDE: {}".format(self.oSpeed) dCol = (255, 255, 255) else: show = "AI: {}".format(str(self.safe_zone)) cv2.rectangle( image, (int(center[0] - szX / 2), int(center[1] - szY / 2)), (int(center[0] + szX / 2), int(center[1] + szY / 2)), (255, 0, 0), 1) # Showing the output Image image = cv2.resize(image, dimensions, interpolation=cv2.INTER_AREA) # Write the frame into the # file 'Prueba_Tello.avi' result.write(image) # Draw the distance choosen cv2.putText(image, show, (32, 664), cv2.FONT_HERSHEY_SIMPLEX, 1, dCol, 2) # Display the resulting frame cv2.imshow(f'Tello Tracking...', image) else: break # On exit, print the battery self.tello.get_battery() # When everything done, release the capture cv2.destroyAllWindows() result.release() # Call it always before finishing. I deallocate resources. self.tello.end() def battery(self): return self.tello.get_battery()[:2] def update(self): """ Update routine. Send velocities to Tello.""" if self.send_rc_control: self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity, self.up_down_velocity, self.yaw_velocity) def Action_OVERRRIDE(self, k): # S & W to fly forward & back if k == ord('w'): self.for_back_velocity = int(S * oSpeed) elif k == ord('s'): self.for_back_velocity = -int(S * oSpeed) else: self.for_back_velocity = 0 # a & d to pan left & right if k == ord('d'): self.yaw_velocity = int(S * self.oSpeed) elif k == ord('a'): self.yaw_velocity = -int(S * self.oSpeed) else: self.yaw_velocity = 0 # Q & E to fly up & down if k == ord('e'): self.up_down_velocity = int(S * self.oSpeed) elif k == ord('q'): self.up_down_velocity = -int(S * self.oSpeed) else: self.up_down_velocity = 0 # c & z to fly left & right if k == ord('c'): self.left_right_velocity = int(S * self.oSpeed) elif k == ord('z'): self.left_right_velocity = -int(S * self.oSpeed) else: self.left_right_velocity = 0 return def Set_Action(self, k, OVERRIDE): # Press 0 to set distance to 0 if k == ord('0'): if not OVERRIDE: print("Distance = 0") self.safe_zone = 0 # Press 1 to set distance to 1 if k == ord('1'): if OVERRIDE: self.oSpeed = 1 else: print("Distance = 1") self.safe_zone = 1 # Press 2 to set distance to 2 if k == ord('2'): if OVERRIDE: self.oSpeed = 2 else: print("Distance = 2") self.safe_zone = 2 # Press 3 to set distance to 3 if k == ord('3'): if OVERRIDE: self.oSpeed = 3 else: print("Distance = 3") self.safe_zone = 3 # Press 4 to set distance to 4 if k == ord('4'): if not OVERRIDE: print("Distance = 4") self.safe_zone = 4 # Press 5 to set distance to 5 if k == ord('5'): if not OVERRIDE: print("Distance = 5") self.safe_zone = 5 # Press 6 to set distance to 6 if k == ord('6'): if not OVERRIDE: print("Distance = 6") self.safe_zone = 6 # Press T to take off if k == ord('t'): if not args.debug: print("Taking Off") self.tello.takeoff() self.tello.get_battery() self.send_rc_control = True # Press L to land if k == ord('l'): if not args.debug: print("Landing") self.tello.land() self.send_rc_control = False # Press Backspace for controls override if k == 8: if not OVERRIDE: OVERRIDE = True print("OVERRIDE ENABLED") else: OVERRIDE = False print("OVERRIDE DISABLED")
class Tello_GCP(object): def __init__(self): # Init Tello object that interacts with the Tello drone self.tello = Tello() # Drone velocities between -100~100 self.for_back_velocity = 0 self.left_right_velocity = 0 self.up_down_velocity = 0 self.yaw_velocity = 0 self.speed = 10 self.send_rc_control = False def run(self): if not self.tello.connect(): print("Tello not connected") return if not self.tello.set_speed(self.speed): print("Not set speed to lowest possible") return # In case streaming is on. This happens when we quit this program without the escape key. if not self.tello.streamoff(): print("Could not stop video stream") return if not self.tello.streamon(): print("Could not start video stream") return frame_read = self.tello.get_frame_read() should_stop = False imgCount = 0 oSpeed = 3 self.tello.get_battery() while not should_stop: self.update() if frame_read.stopped: frame_read.stop() break theTime = str(datetime.datetime.now()).replace(':','-').replace('.','_') frame = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB) frameRet = frame_read.frame vid = self.tello.get_video_capture() time.sleep(1 / FPS) # Listen for key presses k = cv2.waitKey(20) # Press T to take off if k == ord('t'): print("Taking Off") self.tello.takeoff() self.tello.get_battery() self.send_rc_control = True # Press L to land if k == ord('l'): print("Landing") self.tello.land() self.send_rc_control = False if k == ord('p'): print("Captured") cv2.imwrite("{}/tellocap{}.jpg".format(ddir,imgCount),frameRet) main_func("{}/tellocap{}.jpg".format(ddir,imgCount), "{}/visionapi{}.jpg".format(ddir,imgCount),50) upload_blob("tello-images","{}/tellocap{}.jpg".format(ddir,imgCount),"original_image/tellocap{}.jpg".format(imgCount)) upload_blob("tello-images","{}/visionapi{}.jpg".format(ddir,imgCount),"vision_image/visionapi{}.jpg".format(imgCount)) imgCount+=1 frame = np.rot90(frame) # S & W to fly forward & back if k == ord('w'): self.for_back_velocity = int(S * oSpeed) elif k == ord('s'): self.for_back_velocity = -int(S * oSpeed) else: self.for_back_velocity = 0 # a & d to pan left & right if k == ord('d'): self.yaw_velocity = int(S * oSpeed) elif k == ord('a'): self.yaw_velocity = -int(S * oSpeed) else: self.yaw_velocity = 0 # Q & E to fly up & down if k == ord('e'): self.up_down_velocity = int(S * oSpeed) elif k == ord('q'): self.up_down_velocity = -int(S * oSpeed) else: self.up_down_velocity = 0 # c & z to fly left & right if k == ord('c'): self.left_right_velocity = int(S * oSpeed) elif k == ord('z'): self.left_right_velocity = -int(S * oSpeed) else: self.left_right_velocity = 0 # Quit the software if k == 27: should_stop = True break gray = cv2.cvtColor(frameRet, cv2.COLOR_BGR2GRAY) show = "Speed: {}".format(oSpeed) dCol = (255,255,255) # Draw the distance choosen cv2.putText(frameRet,show,(32,664),cv2.FONT_HERSHEY_SIMPLEX,1,dCol,2) # Display the resulting frame cv2.imshow('Tello Vision',frameRet) # On exit, print the battery self.tello.get_battery() # When everything done, release the capture cv2.destroyAllWindows() # Call it always before finishing. I deallocate resources. self.tello.end() def battery(self): return self.tello.get_battery()[:2] def update(self): """ Update routine. Send velocities to Tello.""" if self.send_rc_control: self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity, self.up_down_velocity, self.yaw_velocity)
class TelloDrone(): def __init__(self): """ Get the DJITelloPy Tello object to use for communication with the drone. speed: general speed for non rc control calls (10-100) """ self.tello = Tello() self.left_right_velocity = 0 self.forward_backward_velocity = 0 self.up_down_velocity = 0 self.yaw_velocity = 0 self.has_target = False self.battery_level = 0 self.last_battery_check = 0 self.speed = 20 # Lowest def cleanup(self): """Call this function if the script has to be stopped for whatever reason to make sure everything is exited cleanly""" self.end_drone() cv2.destroyAllWindows() def end_drone(self): """Lands drone and ends drone interaction""" self.stop_moving() self.tello.streamoff() if self.tello.is_flying: self.tello.land() self.tello.end() def update_battery_level(self): if time.time() - self.last_battery_check >= TIME_BTW_BATTERY_CHECKS: self.battery_level = self.tello.get_battery() self.last_battery_check = time.time() def update_drone_velocities_if_flying(self): """Move drone based on velocities only if drone is flying""" if not self.tello.is_flying: return self.tello.send_rc_control(self.left_right_velocity, self.forward_backward_velocity, self.up_down_velocity, self.yaw_velocity) def update_drone_velocities(self): """Move drone based on velocities""" self.tello.send_rc_control(self.left_right_velocity, self.forward_backward_velocity, self.up_down_velocity, self.yaw_velocity) def stop_moving(self): """Freeze in place""" time.sleep(self.tello.TIME_BTW_RC_CONTROL_COMMANDS) # Delay makes sure it is sent self.left_right_velocity = 0 self.forward_backward_velocity = 0 self.up_down_velocity = 0 self.yaw_velocity = 0 self.update_drone_velocities() def scan_surroundings(self): """Start looking around""" self.yaw_velocity = SCAN_VELOCITY self.update_drone_velocities_if_flying() def find_target(self): # Returns False for now return False def handle_user_keypress(self): """Check for pressed keys. 't': Take off 'l': Land 'Esc': Exit Returns: key (int): unicode value of key that was pressed, or -1 if none were pressed""" key = cv2.waitKey(20) if key == ord('t'): print_warning("Drone taking off...") self.tello.takeoff() elif key == ord('l'): print_warning("Landing drone...") self.stop_moving() self.tello.land() elif key == ESC_KEY: print_warning("Stopping drone...") elif key == NO_KEY: if not self.has_target: self.scan_surroundings() return key def initialize_before_run(self): """Set up drone before we bring it to life""" if not self.tello.connect(): print_error("Failed to set drone to SDK mode") sys.exit(-1) print_status("Connected to Drone") if not self.tello.set_speed(self.speed): print_error("Failed to set initial drone speed") print_status(f"Drone speed set to {self.speed} cm/s ({self.speed}%)") # Make sure stream is off first self.tello.streamoff() self.tello.streamon() # Not sure why DJITelloPy doesn't return here self.battery_level = self.tello.get_battery() print_status(f"Drone battery is at {self.battery_level}%") # Reset velocities self.update_drone_velocities() print_status(f"All drone velocities initialized to 0") def run(self, user_interface=True): """Bring an autonomous being to life""" self.initialize_before_run() frame_read = self.tello.get_frame_read() while True: self.update_drone_velocities_if_flying() if frame_read.stopped: frame_read.stop() break frame = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB) frame_ret = frame_read.frame tello_cam = self.tello.get_video_capture() time.sleep(1 / FPS) key_pressed = self.handle_user_keypress() if key_pressed == ESC_KEY: break # Show battery level self.update_battery_level() cv2.putText(frame_ret,f"Battery: {self.battery_level}%",(32,664),cv2.FONT_HERSHEY_SIMPLEX,1,(0, 0, 255), thickness=2) cv2.imshow(f'Tello Tracking...', frame_ret) self.cleanup() # Clean exit
from djitellopy import Tello import cv2 import numpy as np import imutils import time import threading font = cv2.FONT_HERSHEY_COMPLEX tello = Tello() tello.connect() tello.streamon() cap = tello.get_video_capture() faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") while True: img_rgb = (cap.read())[1] gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, scaleFactor=1.3) if (len(faces) > 0): print("Found {0} faces!".format(len(faces))) for (x, y, w, h) in faces: cv2.rectangle(img_rgb, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.putText(img_rgb, str(w), (x, y), font, 1, (0, 255, 0)) #if (w < 200 and airborne): # print("move to face") # tello.move_forward(10)
class DroneThread(QThread): pixSignal = pyqtSignal(Qt.QPixmap, name="pixSignal") upSignal = pyqtSignal(int, name="upSignal") downSignal = pyqtSignal(int, name="downSignal") leftSignal = pyqtSignal(int, name="leftSignal") rightSignal = pyqtSignal(int, name="rightSignal") def __init__(self): self.tello = Tello() self.for_back_velocity = 0 self.left_right_velocity = 0 self.up_down_velocity = 0 self.yaw_velocity = 0 QThread.__init__(self) def run(self): self.tello.send_command_without_return("command") self.tello.streamon() while self.tello.stream_on: frame_read = self.tello.get_frame_read() # setting Face Box properties fbCol = (255, 0, 0) #BGR 0-255 fbStroke = 2 frameRet = frame_read.frame frameRet = cv2.cvtColor(frameRet, cv2.COLOR_BGR2RGB) faces = face_cascade.detectMultiScale(frameRet, scaleFactor=1.5, minNeighbors=2) # Target size tSize = faceSizes[3] # These are our center dimensions cWidth = int(dimensions[0] / 2) cHeight = int(dimensions[1] / 2) # end coords are the end of the bounding box x & y for (x, y, w, h) in faces: end_cord_x = x + w end_cord_y = y + h end_size = w * 2 # these are our target coordinates targ_cord_x = int((end_cord_x + x) / 2) targ_cord_y = int((end_cord_y + y) / 2) + UDOffset # Draw the face bounding box cv2.rectangle(frameRet, (x, y), (end_cord_x, end_cord_y), fbCol, fbStroke) # Draw the target as a circle cv2.circle(frameRet, (targ_cord_x, targ_cord_y), 10, (0, 255, 0), 2) # This calculates the vector from your face to the center of the screen vTrue = np.array((cWidth, cHeight, tSize)) vTarget = np.array((targ_cord_x, targ_cord_y, end_size)) vDistance = vTrue - vTarget if vDistance[0] < -100: self.yaw_velocity = S self.left_right_velocity = S2 elif vDistance[0] > 100: self.yaw_velocity = -S self.left_right_velocity = -S2 else: self.yaw_velocity = 0 # for up & down if vDistance[1] > 100: self.up_down_velocity = S elif vDistance[1] < -100: self.up_down_velocity = -S else: self.up_down_velocity = 0 F = 0 if abs(vDistance[2]) > acc[3]: F = S # for forward back if vDistance[2] > 0: self.for_back_velocity = S + F elif vDistance[2] < 0: self.for_back_velocity = -S - F else: self.for_back_velocity = 0 img = QtGui.QImage(frameRet, frameRet.shape[1], frameRet.shape[0], QtGui.QImage.Format_RGB888) pix = QtGui.QPixmap.fromImage(img) self.pixSignal.emit(pix) self.rightSignal.emit(0) self.upSignal.emit(0) self.downSignal.emit(0) self.leftSignal.emit(0) self.update() self.updateCoordsTxt() vid = self.tello.get_video_capture() time.sleep(1 / 60) def update(self): """ Update routine. Send velocities to Tello.""" self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity, self.up_down_velocity, self.yaw_velocity) def updateCoordsTxt(self): if self.left_right_velocity > 0: self.rightSignal.emit(self.left_right_velocity) if self.left_right_velocity < 0: self.leftSignal.emit(self.left_right_velocity) if self.up_down_velocity > 0: self.upSignal.emit(self.up_down_velocity) if self.up_down_velocity < 0: self.downSignal.emit(self.up_down_velocity) def get_drone(self): return self.tello
class FrontEnd(object): def __init__(self): # 드론과 상호작용하는 Tello 객체 self.tello = Tello() # 드론의 속도 (-100~100) #수직, 수평 속도 self.for_back_velocity = 0 self.left_right_velocity = 0 self.up_down_velocity = 0 self.yaw_velocity = 0 self.speed = 10 self.send_rc_control = False # 실행 함수 def run(self): #드론이 연결이 되지 않으면 함수 종료 if not self.tello.connect(): print("Tello not connected") return #drone의 제한속도가 적절하지 않은 경우 if not self.tello.set_speed(self.speed): print("Not set speed to lowest possible") return # 프로그램을 비정상적인 방법으로 종료를 시도하여 비디오 화면이 꺼지지 않은 경우 종료. if not self.tello.streamoff(): print("Could not stop video stream") return # 비디오가 켜지지않는 경우 종료. if not self.tello.streamon(): print("Could not start video stream") return #프레임 단위로 인식 frame_read = self.tello.get_frame_read() should_stop = False imgCount = 0 OVERRIDE = False oSpeed = args.override_speed tDistance = args.distance self.tello.get_battery() # X축 안전 범위 szX = args.saftey_x # Y축 안전 범위 szY = args.saftey_y #디버깅 모드 if args.debug: print("DEBUG MODE ENABLED!") #비행을 멈취야할 상황이 주어지지 않은 경우 while not should_stop: self.update() #프레임 입력이 멈췄을 경우 while문 탈출 if frame_read.stopped: frame_read.stop() break theTime = str(datetime.datetime.now()).replace(':', '-').replace( '.', '_') frame = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB) frameRet = frame_read.frame vid = self.tello.get_video_capture() # 키보드 입력을 기다림 k = cv2.waitKey(20) # 프로그램 종료 if k == 27: should_stop = True break gray = cv2.cvtColor(frameRet, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=2) # 비어있는 이미지를 생성 canvas = np.zeros((250, 300, 3), dtype="uint8") # 대상 크기 tSize = faceSizes[tDistance] # 중심 차원들 cWidth = int(dimensions[0] / 2) cHeight = int(dimensions[1] / 2) noFaces = len(faces) == 0 # 얼굴이 찾아진 경우에만 감정 인식을 실행 if len(faces) > 0: # 가장 큰 이미지에 대해서 실행 face = sorted(faces, reverse=True, key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0] (fX, fY, fW, fH) = face # 이미지를 48x48 사이즈로 재조정 (neural network 위함) roi = gray[fY:fY + fH, fX:fX + fW] roi = cv2.resize(roi, (48, 48)) roi = roi.astype("float") / 255.0 roi = img_to_array(roi) roi = np.expand_dims(roi, axis=0) # 감정을 예측 preds = emotion_classifier.predict(roi)[0] emotion_probability = np.max(preds) label = EMOTIONS[preds.argmax()] # label 을 할당, 2020.08.11 frame -> frameRet cv2.putText(frameRet, label, (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2) cv2.rectangle(frameRet, (fX, fY), (fX + fW, fY + fH), (0, 0, 255), 2) # label 을 출력 for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)): text = "{}: {:.2f}%".format(emotion, prob * 100) w = int(prob * 300) cv2.rectangle(canvas, (7, (i * 35) + 5), (w, (i * 35) + 35), (0, 0, 255), -1) cv2.putText(canvas, text, (10, (i * 35) + 23), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 255), 2) cv2.imshow('Emotion Recognition', frameRet) cv2.imshow("Probabilities", canvas) # 컨트롤을 얻고, 얼굴 좌표 등을 얻으면 if self.send_rc_control and not OVERRIDE: for (x, y, w, h) in faces: # roi_gray = gray[y:y + h, x:x + w] #(ycord_start, ycord_end) roi_color = frameRet[y:y + h, x:x + w] # 얼굴 상자 특성 설정 fbCol = (255, 0, 0) #BGR 0-255 fbStroke = 2 # 끝 좌표들은 x와 y를 제한하는 박스의 끝에 존재 end_cord_x = x + w end_cord_y = y + h end_size = w * 2 # 목표 좌표들 targ_cord_x = int((end_cord_x + x) / 2) targ_cord_y = int((end_cord_y + y) / 2) + UDOffset # 얼굴에서 화면 중심까지의 벡터를 계산 vTrue = np.array((cWidth, cHeight, tSize)) vTarget = np.array((targ_cord_x, targ_cord_y, end_size)) vDistance = vTrue - vTarget # 문제가 발생되지 않은경우 얼굴을 화면의 중앙에 위치시키도록 드론을 이동 if not args.debug: # 회전 if vDistance[0] < -szX: self.yaw_velocity = S # self.left_right_velocity = S2 elif vDistance[0] > szX: self.yaw_velocity = -S # self.left_right_velocity = -S2 else: self.yaw_velocity = 0 # 위 & 아래 (상승/하강) if vDistance[1] > szY: self.up_down_velocity = S elif vDistance[1] < -szY: self.up_down_velocity = -S else: self.up_down_velocity = 0 F = 0 if abs(vDistance[2]) > acc[tDistance]: F = S # 앞, 뒤 if vDistance[2] > 0: self.for_back_velocity = S + F elif vDistance[2] < 0: self.for_back_velocity = -S - F else: self.for_back_velocity = 0 # 얼굴 테두리 박스를 그림 cv2.rectangle(frameRet, (x, y), (end_cord_x, end_cord_y), fbCol, fbStroke) # 목표를 원으로 그림 cv2.circle(frameRet, (targ_cord_x, targ_cord_y), 10, (0, 255, 0), 2) # 안전 구역을 그림 cv2.rectangle(frameRet, (targ_cord_x - szX, targ_cord_y - szY), (targ_cord_x + szX, targ_cord_y + szY), (0, 255, 0), fbStroke) # 드론의 얼굴 상자로부터의 상대적 벡터 위치를 구함. cv2.putText(frameRet, str(vDistance), (0, 64), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) # 인식되는 얼굴이 없으면 아무것도 안함. if noFaces: self.yaw_velocity = 0 self.up_down_velocity = 0 self.for_back_velocity = 0 print("NO TARGET") # 화면의 중심을 그림. 드론이 목표 좌표와 맞추려는 대상이 됨. cv2.circle(frameRet, (cWidth, cHeight), 10, (0, 0, 255), 2) dCol = lerp(np.array((0, 0, 255)), np.array((255, 255, 255)), tDistance + 1 / 7) if OVERRIDE: show = "OVERRIDE: {}".format(oSpeed) dCol = (255, 255, 255) else: show = "AI: {}".format(str(tDistance)) # 선택된 거리를 그림 cv2.putText(frameRet, show, (32, 664), cv2.FONT_HERSHEY_SIMPLEX, 1, dCol, 2) # 종료시에 배터리를 출력 self.tello.get_battery() # 전부 완료되면 캡쳐를 해제함. cv2.destroyAllWindows() # 종료 전에 항상 호출. 자원들을 해제함. self.tello.end() def battery(self): return self.tello.get_battery()[:2] def update(self): """ Update routine. Send velocities to Tello.""" if self.send_rc_control: self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity, self.up_down_velocity, self.yaw_velocity)
#Cálculo Softmax score = tf.nn.softmax(model.fc8) max = tf.arg_max(score, 1) with tf.Session() as sess: saver.restore(sess, checkpoint_path) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) pred = sess.run(score, feed_dict={keep_prob: 1.}) print("prediction: ", pred) prob = sess.run(max, feed_dict={keep_prob: 1.})[0] print(prob) class_name = class_names[np.argmax(pred)] print("Categoria: ", class_name) vid = mdrone.get_video_capture() if args.save_session: cv2.imwrite("{}/tellocap{}.jpg".format(ddir, imgCount), image) # Listen to any key press k = cv2.waitKey(20) # Press L to land if k == ord('l'): if not args.debug: print("Landing") mdrone.land() mdrone.send_rc_control = False #Visualização font = cv2.FONT_HERSHEY_SIMPLEX
def __init__(self, drone: Tello): self.vid = drone.get_video_capture() if not self.vid.isOpened(): raise ValueError("Unable to open video source") self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH) self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)