def main(): drone = tello.Tello("", 9999) videoLoop(drone)
def main(): # Telloクラスを使って,droneというインスタンス(実体)を作る drone = tello.Tello('', 8889, command_timeout=.01) current_time = time.time() # 現在時刻の保存変数 pre_time = current_time # 5秒ごとの'command'送信のための時刻変数 time.sleep(0.5) # 通信が安定するまでちょっと待つ # トラックバーを作るため,まず最初にウィンドウを生成 cv2.namedWindow("OpenCV Window") # トラックバーのコールバック関数は何もしない空の関数 def nothing(x): pass # トラックバーの生成 cv2.createTrackbar("H_min", "OpenCV Window", 0, 179, nothing) # Hueの最大値は179 cv2.createTrackbar("H_max", "OpenCV Window", 128, 179, nothing) cv2.createTrackbar("S_min", "OpenCV Window", 128, 255, nothing) cv2.createTrackbar("S_max", "OpenCV Window", 255, 255, nothing) cv2.createTrackbar("V_min", "OpenCV Window", 128, 255, nothing) cv2.createTrackbar("V_max", "OpenCV Window", 255, 255, nothing) #Ctrl+cが押されるまでループ try: while True: # (A)画像取得 frame = drone.read() # 映像を1フレーム取得 if frame is None or frame.size == 0: # 中身がおかしかったら無視 continue # (B)ここから画像処理 image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # OpenCV用のカラー並びに変換する bgr_image = cv2.resize(image, dsize=(480, 360)) # 画像サイズを半分に変更 hsv_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV) # BGR画像 -> HSV画像 # トラックバーの値を取る h_min = cv2.getTrackbarPos("H_min", "OpenCV Window") h_max = cv2.getTrackbarPos("H_max", "OpenCV Window") s_min = cv2.getTrackbarPos("S_min", "OpenCV Window") s_max = cv2.getTrackbarPos("S_max", "OpenCV Window") v_min = cv2.getTrackbarPos("V_min", "OpenCV Window") v_max = cv2.getTrackbarPos("V_max", "OpenCV Window") # inRange関数で範囲指定2値化 -> マスク画像として使う mask_image = cv2.inRange( hsv_image, (h_min, s_min, v_min), (h_max, s_max, v_max)) # HSV画像なのでタプルもHSV並び # bitwise_andで元画像にマスクをかける -> マスクされた部分の色だけ残る result_image = cv2.bitwise_and(hsv_image, hsv_image, mask=mask_image) # (X)ウィンドウに表示 cv2.imshow('OpenCV Window', result_image) # ウィンドウに表示するイメージを変えれば色々表示できる # (Y)OpenCVウィンドウでキー入力を1ms待つ key = cv2.waitKey(1) if key == 27: # k が27(ESC)だったらwhileループを脱出,プログラム終了 break elif key == ord('t'): drone.takeoff() # 離陸 elif key == ord('l'): drone.land() # 着陸 elif key == ord('w'): drone.move_forward(0.3) # 前進 elif key == ord('s'): drone.move_backward(0.3) # 後進 elif key == ord('a'): drone.move_left(0.3) # 左移動 elif key == ord('d'): drone.move_right(0.3) # 右移動 elif key == ord('q'): drone.rotate_ccw(20) # 左旋回 elif key == ord('e'): drone.rotate_cw(20) # 右旋回 elif key == ord('r'): drone.move_up(0.3) # 上昇 elif key == ord('f'): drone.move_down(0.3) # 下降 # (Z)5秒おきに'command'を送って、死活チェックを通す current_time = time.time() # 現在時刻を取得 if current_time - pre_time > 5.0: # 前回時刻から5秒以上経過しているか? drone.send_command('command') # 'command'送信 pre_time = current_time # 前回時刻を更新 except (KeyboardInterrupt, SystemExit): # Ctrl+cが押されたら離脱 print("SIGINTを検知") # telloクラスを削除 del drone
def main(): drone = tello.Tello('', 8889) time.sleep(5) detect_horse = False dist1 = 0.9 dist2 = 1.3 # fs = cv2.FileStorage("data.txt", cv2.FILE_STORAGE_READ) # cameraMatrix = fs.getNode("intrinsic") # distCoeffs = fs.getNode("distortion") # cameraMatrix = cameraMatrix.mat() # distCoeffs = distCoeffs.mat() dictionary = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250) parameters = cv2.aruco.DetectorParameters_create() detect_mark = False firstin = False adjust_height = True while(1): frame = drone.read() frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) cv2.imshow("frame", frame) key = cv2.waitKey(1) if key == ord('1'): firstin = True if firstin == False: continue if key != -1: drone.keyboard(key) if state == 0: if adjust_height: drone.move_up(0.4) adjust_height = False show_frame(frame) continue frame, dist = detectObj(frame, "horse", 20.0, 21.0) if (dist < 55): state = 1 drone.move_right( dist1) time.sleep(2) else drone.move_forward(0.3) show_frame(frame) elif state == 1 or state == 2 or state == 4 or state == 5 or state == 6: markerCorners, markerIds, rejectedCandidates = cv2.aruco.detectMarkers(frame, dictionary, parameters=parameters) if len(markerCorners) > 0 and len(markerIds) != 0: detect_marker(drone, frame, markerCorners, markerIds) elif state == 3: drone.move_left(0.5) frame, dist = detectObj(frame, "traffic light", 20.0, 21.0) if dist != 999: drone.rotate_ccw(360) time.sleep(2) drone.move_left(0.3) # detect_marker(frame, markerCorners, markerIds, cameraMatrix, distCoeffs) # time.sleep(5) # drone.move_right( dist1 ) # time.sleep(5) # drone.move_forward( dist2 ) # time.sleep(5) # drone.move_left( dist1 ) # time.sleep(5) # drone.move_forward( dist2 ) # detect_mark = True show_frame(frame) if finish: break
import tello import logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(threadName)s %(message)s') log = logging.getLogger('Drone app') log.info('Starting') t = tello.Tello(command_timeout=5) try: t.take_off() t.flip('f') except Exception as e: log.error(e) t.land()
path = os.path.dirname(file) camera_info_msg = yaml_to_CameraInfo(str(path + "/cam_calibration.yaml")) bridge = CvBridge() rospy.init_node('tello_node', anonymous=False) image_pub = rospy.Publisher("tello_node/image_raw", Image, queue_size=10) caminfo_pub = rospy.Publisher("tello_node/camera_info", CameraInfo, queue_size=10) rospy.Service('tello_node/send_command', SendCommand, command_handler) rospy.Subscriber("tello_node/takeoff", String, takeoff) # just for quick take off rospy.Subscriber("tello_node/land", String, land) # just for quick land try: drone = tello.Tello('', 8889) time.sleep(0.5) sending_command_thread = threading.Thread(target=sendingCommand) sending_command_thread.daemon = True sending_command_thread.start() while not rospy.is_shutdown(): img = drone.read() if img is None: continue img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) ROS_Image = bridge.cv2_to_imgmsg(img, "bgr8") ROS_Image.header.frame_id = 'image_rect' camera_info_msg.header.frame_id = 'image_rect' image_pub.publish(ROS_Image) caminfo_pub.publish(camera_info_msg)
import tello import time # Create Billy billy = tello.Tello() # Travel to/from starting checkpoint 0 from/to the charging base frombase = ["forward", 50, "ccw", 150] tobase = ["ccw", 150, "forward", 50] # Flight path to Checkpoint 1 to 5 and back to Checkpoint 0 sequentially checkpoint = [[1, "cw", 90, "forward", 100], [2, "ccw", 90, "forward", 80], [3, "ccw", 90, "forward", 40], [4, "ccw", 90, "forward", 40], [5, "cw", 90, "forward", 60], [0, "ccw", 90, "forward", 40]] # Put Tello into command mode billy.send("command", 3) # Send the takeoff command billy.send("takeoff", 7) print("\n") # Start at checkpoint 1 and print destination print("From the charging base to the starting checkpoint of sweep pattern.\n") billy.send(frombase[0] + " " + str(frombase[1]), 4) billy.send(frombase[2] + " " + str(frombase[3]), 4) print("Current location: Checkpoint 0 " + "\n")
def main(): # Telloクラスを使って,droneというインスタンス(実体)を作る drone = tello.Tello('', 8889, command_timeout=.01) current_time = time.time() # 現在時刻の保存変数 pre_time = current_time # 5秒ごとの'command'送信のための時刻変数 time.sleep(0.5) # 通信が安定するまでちょっと待つ # トラックバーを作るため,まず最初にウィンドウを生成 cv2.namedWindow("OpenCV Window") # トラックバーのコールバック関数は何もしない空の関数 def nothing(x): pass # トラックバーの生成 cv2.createTrackbar("H_min", "OpenCV Window", 0, 179, nothing) cv2.createTrackbar("H_max", "OpenCV Window", 128, 179, nothing) # Hueの最大値は179 cv2.createTrackbar("S_min", "OpenCV Window", 128, 255, nothing) cv2.createTrackbar("S_max", "OpenCV Window", 255, 255, nothing) cv2.createTrackbar("V_min", "OpenCV Window", 128, 255, nothing) cv2.createTrackbar("V_max", "OpenCV Window", 255, 255, nothing) a = b = c = d = 0 # rcコマンドの初期値を入力 b = 40 # 前進の値を40に設定 flag = 0 #Ctrl+cが押されるまでループ try: while True: # (A)画像取得 frame = drone.read() # 映像を1フレーム取得 if frame is None or frame.size == 0: # 中身がおかしかったら無視 continue # (B)ここから画像処理 image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # OpenCV用のカラー並びに変換する small_image = cv2.resize(image, dsize=(480,360) ) # 画像サイズを半分に変更 bgr_image = small_image[250:359,0:479] # 注目する領域(ROI)を(0,250)-(479,359)で切り取る hsv_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV) # BGR画像 -> HSV画像 # トラックバーの値を取る h_min = cv2.getTrackbarPos("H_min", "OpenCV Window") h_max = cv2.getTrackbarPos("H_max", "OpenCV Window") s_min = cv2.getTrackbarPos("S_min", "OpenCV Window") s_max = cv2.getTrackbarPos("S_max", "OpenCV Window") v_min = cv2.getTrackbarPos("V_min", "OpenCV Window") v_max = cv2.getTrackbarPos("V_max", "OpenCV Window") # inRange関数で範囲指定2値化 bin_image = cv2.inRange(hsv_image, (h_min, s_min, v_min), (h_max, s_max, v_max)) # HSV画像なのでタプルもHSV並び kernel = np.ones((15,15),np.uint8) # 15x15で膨張させる dilation_image = cv2.dilate(bin_image,kernel,iterations = 1) # 膨張して虎ロープをつなげる #erosion_image = cv2.erode(dilation_image,kernel,iterations = 1) # 収縮 # bitwise_andで元画像にマスクをかける -> マスクされた部分の色だけ残る masked_image = cv2.bitwise_and(hsv_image, hsv_image, mask=dilation_image) # ラベリング結果書き出し用に画像を準備 out_image = masked_image # 面積・重心計算付きのラベリング処理を行う num_labels, label_image, stats, center = cv2.connectedComponentsWithStats(dilation_image) # 最大のラベルは画面全体を覆う黒なので不要.データを削除 num_labels = num_labels - 1 stats = np.delete(stats, 0, 0) center = np.delete(center, 0, 0) if num_labels >= 1: # 面積最大のインデックスを取得 max_index = np.argmax(stats[:,4]) #print max_index # 面積最大のラベルのx,y,w,h,面積s,重心位置mx,myを得る x = stats[max_index][0] y = stats[max_index][1] w = stats[max_index][2] h = stats[max_index][3] s = stats[max_index][4] mx = int(center[max_index][0]) my = int(center[max_index][1]) #print("(x,y)=%d,%d (w,h)=%d,%d s=%d (mx,my)=%d,%d"%(x, y, w, h, s, mx, my) ) # ラベルを囲うバウンディングボックスを描画 cv2.rectangle(out_image, (x, y), (x+w, y+h), (255, 0, 255)) # 重心位置の座標を表示 #cv2.putText(out_image, "%d,%d"%(mx,my), (x-15, y+h+15), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 0)) cv2.putText(out_image, "%d"%(s), (x, y+h+15), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 0)) if flag == 1: # a=c=d=0, b=40が基本. # 左右旋回のdだけが変化する. # 前進速度のbはキー入力で変える. dx = 1.0 * (240 - mx) # 画面中心との差分 # 旋回方向の不感帯を設定 d = 0.0 if abs(dx) < 50.0 else dx # ±50未満ならゼロにする d = -d # 旋回方向のソフトウェアリミッタ(±100を超えないように) d = 100 if d > 100.0 else d d = -100 if d < -100.0 else d print('dx=%f'%(dx) ) drone.send_command('rc %s %s %s %s'%(int(a), int(b), int(c), int(d)) ) # (X)ウィンドウに表示 cv2.imshow('OpenCV Window', out_image) # ウィンドウに表示するイメージを変えれば色々表示できる # (Y)OpenCVウィンドウでキー入力を1ms待つ key = cv2.waitKey(1) if key == 27: # k が27(ESC)だったらwhileループを脱出,プログラム終了 break elif key == ord('t'): drone.takeoff() # 離陸 elif key == ord('l'): drone.land() # 着陸 elif key == ord('w'): drone.move_forward(0.3) # 前進 elif key == ord('s'): drone.move_backward(0.3) # 後進 elif key == ord('a'): drone.move_left(0.3) # 左移動 elif key == ord('d'): drone.move_right(0.3) # 右移動 elif key == ord('q'): drone.rotate_ccw(20) # 左旋回 elif key == ord('e'): drone.rotate_cw(20) # 右旋回 elif key == ord('r'): drone.move_up(0.3) # 上昇 elif key == ord('f'): drone.move_down(0.3) # 下降 elif key == ord('1'): flag = 1 # 追跡モードON elif key == ord('2'): flag = 0 # 追跡モードOFF drone.send_command('rc 0 0 0 0') elif key == ord('y'): # 前進速度をキー入力で可変 b = b + 10 if b > 100: b = 100 elif key == ord('h'): b = b - 10 if b < 0: b = 0 # (Z)5秒おきに'command'を送って、死活チェックを通す current_time = time.time() # 現在時刻を取得 if current_time - pre_time > 5.0 : # 前回時刻から5秒以上経過しているか? drone.send_command('command') # 'command'送信 pre_time = current_time # 前回時刻を更新 except( KeyboardInterrupt, SystemExit): # Ctrl+cが押されたら離脱 print( "SIGINTを検知" ) drone.send_command('streamoff') # telloクラスを削除 del drone
def main(): drone = tello.Tello('', 8889) vplayer = TelloUI(drone) vplayer.root.mainloop()
RECORD_SECONDS = 5 #16000*5 # fixed chunk size # initialize portaudio p = pyaudio.PyAudio() stream = p.open(format=pyaudio.paInt16, channels=1, rate=fs, input=True, frames_per_buffer=CHUNK) start_time = time.time() connection = tello.Tello() connection.send("command") print(connection.send("battery?")) try: while True: # do this as long as you want fresh samples numpydata = [] text = input("Press [ENTER] to continue...") print(connection.send("battery?")) stream.start_stream() for i in range(0, int(fs / CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) numpydata.append(np.fromstring(data, dtype=np.int16)) stream.stop_stream()
def main(): drone = tello.Tello('', 8889) vplayer = TelloUI(drone, "./img/") # start the Tkinter mainloop vplayer.root.mainloop()
""" Example Takeoff and Land Program Created for Tech Garage Contributors: Dexter Dixon, Danny Dasilva """ #This imports all the necessary packages for the program import pygame import tello import time #This variable allows us to use "drone" instead of the long line drone = tello.Tello('192.168.10.2', 8888, imperial=False) #Initializes Pygame and one joystick pygame.init() joystick = pygame.joystick.Joystick(0) joystick.init() #Takeoff functoin drone.takeoff() #Function that stops the program before running the next command for (x) amount of seconds time.sleep(3) #Landing function drone.land()
def main(name, number): drone = tello.Tello('', 8889) thread = threading.Thread(target=videoLoop, args=(drone, )) thread.start() time.sleep(10) server_yolo = get_socket_server(HOST="127.0.0.1", PORT=8888) server_CM = get_socket_server(HOST="127.0.0.1", PORT=9999) isTakeOff = raw_input("Do you want to take off the drone? (yes / no)") if isTakeOff == "yes": drone.takeoff() time.sleep(10) drone.move_up(0.5) time.sleep(7) isFindCM = False isExtract = False total_rotate = 0 while True: if not isFindCM: isFindCM = find_CM(drone, server_yolo, 3) total_rotate += 30 if isFindCM: print "\n\nEnd find CM\n\n" if total_rotate == 360: total_rotate = 0 drone.move_up(0.3) else: total_rotate = 0 isFindCM, isExtract = control_drone(drone, server_yolo, 3, True) if isExtract == "So closed": print "\nisExtract!!!!!!!!!!!!!\n" _ = get_frame(drone) _ = get_client_data(server_yolo, False) time.sleep(1) client_data = get_client_data(server_CM, False) print "Input number: {}".format(number) print "Predicted number: {}".format(client_data) numbers = [] for i in str(number): numbers.append(int(i)) break while True: print "\nisExtract!!!!!!!!!!!!!\n" _ = get_frame(drone) _ = get_client_data(server_yolo, False) time.sleep(1) client_data = get_client_data(server_CM, False) print "Input number: {}".format(number) print "Predicted number: {}".format(client_data) if len(client_data) != 3: print "Invaild prediction result" magic = np.random.randint(2) if magic == 1: drone.rotate_cw(2) else: drone.rotate_ccw(2) elif numbers[0] == client_data[0] and numbers[1] == client_data[ 1] and numbers[2] == client_data[2]: print "\n\nGood prediction!\n\n" break else: print "\n\nWhat are you doing now???????\n\n" drone.rotate_cw(2) drone.rotate_ccw(2) files = os.listdir("./Results_segmentation") for file in files: os.remove(os.path.join("./Results_segmentation", file)) isFindPeter = False isBack = False total_rotate = 0 while True: if not isFindPeter: isFindPeter = find_CM(drone, server_yolo, name) total_rotate += 30 if isFindPeter: print "\n\nEnd find Peter\n\n" if total_rotate == 360: total_rotate = 0 drone.move_up(0.3) else: total_rotate = 0 isFindPeter, isBack = control_drone(drone, server_yolo, name, False) if isBack == "So closed": print "\n\n\nFind Costumer\n\n\n" time.sleep(5) drone.land() time.sleep(10) break # while True: # _ = get_frame(drone) # client_data = get_client_data(server_yolo, True) # # if name not in client_data[:, 1]: # drone.rotate_cw(30) # time.sleep(5) # else: # break # # while True: # frame = get_frame(drone) # client_data = get_client_data(server_yolo, True) # bbox_idx = np.where(client_data[:, 1] == name)[0][0] # # rotate_drone(drone, frame, client_data[bbox_idx]) # # frame = get_frame(drone) # client_data = get_client_data(server_yolo, True) # bbox_idx = np.where(client_data[:, 1] == name)[0][0] # translate_drone(drone, frame, client_data[bbox_idx]) # # frame = get_frame(drone) # client_data = get_client_data(server_yolo, True) # bbox_idx = np.where(client_data[:, 1] == name)[0][0] # isBack = forward_or_backward(drone, frame, cline_data[bbox_idx], False) # # if isBack == "So closed": # drone.land() # time.sleep(10) # break print("Program end !!!")
def initTello(): global mDrone mDrone = tello.Tello()
def main(): # Telloクラスを使って,droneというインスタンス(実体)を作る drone = tello.Tello('', 8889, command_timeout=1.0) time.sleep(0.5) # 通信が安定するまでちょっと待つ # zbarによるQRコード認識の準備 scanner = zbar.ImageScanner() scanner.parse_config('enable') current_time = time.time() # 現在時刻の保存変数 pre_time = current_time # 5秒ごとの'command'送信のための時刻変数 #Ctrl+cが押されるまでループ try: while True: # (A)画像取得 frame = drone.read() # 映像を1フレーム取得 if frame is None or frame.size == 0: # 中身がおかしかったら無視 continue # (B)ここから画像処理 image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # OpenCV用のカラー並びに変換する small_image = cv2.resize(image, dsize=(480, 360)) # 画像サイズを半分に変更 # QRコード認識のための処理 gray_image = cv2.cvtColor( small_image, cv2.COLOR_BGR2GRAY) # zbarで認識させるために,グレイスケール画像にする rows, cols = gray_image.shape[:2] # 画像データから画像のサイズを取得(480x360) image = zbar.Image(cols, rows, 'Y800', gray_image.tostring()) # zbarのイメージへ変換 scanner.scan(image) # zbarイメージをスキャンしてQRコードを探す # スキャン結果はimageに複数個入っているので,for文でsymbolという名で取り出して繰り返す for symbol in image: qr_type = symbol.type # 認識したコードの種別 qr_msg = symbol.data # QRコードに書かれたテキスト qr_positions = symbol.location # QRコードを囲む矩形の座標成分 print('QR code : %s, %s, %s' % (qr_type, qr_msg, str(qr_positions))) # 認識結果を表示 # 認識したQRコードを枠線で囲む pts = np.array(qr_positions) # NumPyのarray形式にする cv2.polylines(small_image, [pts], True, (0, 255, 0), thickness=3) # ポリゴンを元のカラー画像に描画 del image # zbarイメージの削除 # (X)ウィンドウに表示 cv2.imshow('OpenCV Window', small_image) # ウィンドウに表示するイメージを変えれば色々表示できる time.sleep(1) # (Y)OpenCVウィンドウでキー入力を1ms待つ key = cv2.waitKey(1) if key == 27: # k が27(ESC)だったらwhileループを脱出,プログラム終了 break elif key == ord('t'): drone.takeoff() # 離陸 elif key == ord('l'): flag = 0 drone.land() # 着陸 # (Z)14秒おきに'command'を送って、死活チェックを通す current_time = time.time() # 現在時刻を取得 if current_time - pre_time > 14.0: # 前回時刻から14秒以上経過しているか? drone.send_command('command') # 'command'送信 pre_time = current_time # 前回時刻を更新 except (KeyboardInterrupt, SystemExit): # Ctrl+cが押されたら離脱 print("SIGINTを検知") # telloクラスを削除 del drone
def main(): drone = tello.Tello('', 8889) time.sleep(5) fs = cv2.FileStorage("data.txt", cv2.FILE_STORAGE_READ) cameraMatrix = fs.getNode("intrinsic") distCoeffs = fs.getNode("distortion") cameraMatrix = cameraMatrix.mat() distCoeffs = distCoeffs.mat() dictionary = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250) parameters = cv2.aruco.DetectorParameters_create() while (1): frame = drone.read() frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) ############ markerCorners, markerIds, rejectedCandidates = cv2.aruco.detectMarkers( frame, dictionary, parameters=parameters) frame = cv2.aruco.drawDetectedMarkers(frame, markerCorners, markerIds) rvec, tvec, _objPoints = cv2.aruco.estimatePoseSingleMarkers( markerCorners, 13.7, cameraMatrix, distCoeffs) # print(tvec) # print(rvec) t_vec = tvec[0][0] # string = str(", ".join(tvec[0])) try: string = "x: " + str(round(t_vec[0], 3)) + ", " + "y: " + str( round(t_vec[1], 3)) + " z: " + ", " + str(round(t_vec[2], 3)) cv2.putText(frame, string, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA) frame = cv2.aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvec, tvec, 10) # get rotation matrix rtm = cv2.Rodrigues(rvec) z = [0, 0, 1] # dot product of two vec v_ = -np.dot(np.array(rtm[0]), np.array(z)) # project to xz plane v[1] = 0 radis = math.atan2(v[0], v[2]) angle = math.degrees(radis) distance = 0.3 if np.abs(angle / 2.0) > 20: if angle > 0: drone.rotate_cw(np.abs(angle / 3)) else: drone.rotate_ccw(np.abs(angle / 3)) else: if t_vec[2] < 70: drone.move_backward(distance) else: drone.move_forward(distance) # adjust direction # # x > 0, move right # if t_vec[0] > 0.3: # drone.move_right(distance) # # x < 0, move left # if t_vec[0] < -0.3: # drone.move_left(distance) # # y > 0, move down # if t_vec[1] > 0.3: # drone.move_down(distance) # # y < 0, move up # if t_vec[1] < -0.3: # drone.move_up(distance) # z > 0, move front # if t_vec[2] > 0.8: # drone.move_forward(distance) # if t_vec[2] < 0.8: # drone.move_backward(distance) except: pass finally: cv2.imshow("frame", frame) key = cv2.waitKey(32) if key != -1: drone.keyboard(key)
def main(): drone = tello.Tello('', 8889) vplayer = TelloUI(drone,"./img/") # It will starts tkimage loop vplayer.root.mainloop()
import tello import time drone = tello.Tello('192.168.10.3', 8889) #time.sleep(5) #drone.set_speed(2) #time.sleep(1) #drone.move_forward(10) #time.sleep(10) #drone.rotate_cw(180) #time.sleep(5) drone.land()
def main(): # Telloクラスを使って,droneというインスタンス(実体)を作る drone = tello.Tello('', 8889, command_timeout=10.0) time.sleep(0.5) # 通信が安定するまでちょっと待つ # zbarによるQRコード認識の準備 scanner = zbar.ImageScanner() scanner.parse_config('enable') pre_qr_msg = None # 前回見えたQRコードのテキストを格納 count = 0 # 同じテキストが見えた回数を記憶する変数 commands = None # 認識したQRコードをTelloのコマンドとして使う command_index = 0 # 実行するコマンドの番号 flag = 0 # 自動制御のフラグは初期0 current_time = time.time() # 現在時刻の保存変数 pre_time = current_time # 5秒ごとの'command'送信のための時刻変数 #Ctrl+cが押されるまでループ try: while True: # (A)画像取得 frame = drone.read() # 映像を1フレーム取得 if frame is None or frame.size == 0: # 中身がおかしかったら無視 continue # (B)ここから画像処理 image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # OpenCV用のカラー並びに変換する small_image = cv2.resize(image, dsize=(480, 360)) # 画像サイズを半分に変更 # 自動制御フラグが0FF(=0)のときには,QRコード認識処理を行う if flag == 0: # QRコード認識のための処理 gray_image = cv2.cvtColor( small_image, cv2.COLOR_BGR2GRAY) # zbarで認識させるために,グレイスケール画像にする rows, cols = gray_image.shape[:2] # 画像データから画像のサイズを取得(480x360) image = zbar.Image(cols, rows, 'Y800', gray_image.tostring()) # zbarのイメージへ変換 scanner.scan(image) # zbarイメージをスキャンしてQRコードを探す # 一度に2つ以上のQRコードを見せるのはNG for symbol in image: qr_msg = symbol.data # 50回同じQRコードが見えたらコマンド送信する処理 try: if qr_msg != None: # qr_msgが空(QRコードが1枚も認識されなかった)場合は何もしない if qr_msg == pre_qr_msg: # 今回認識したqr_msgが前回のpre_qr_msgと同じ時には処理 count += 1 # 同じQRコードが見えてる限りはカウンタを増やす if count > 50: # 50回同じQRコードが続いたら,コマンドを確定する print('QR code 認識 : %s' % (qr_msg)) commands = qr_msg command_index = 0 flag = 1 # 自動制御を有効にする count = 0 # コマンド送信したらカウント値をリセット else: count = 0 pre_qr_msg = qr_msg # 前回のpre_qr_msgを更新する else: count = 0 # 何も見えなくなったらカウント値をリセット except ValueError, e: # if ids != None の処理で時々エラーが出るので,try exceptで捕まえて無視させる print("ValueError") del image # 自動制御フラグがON(=1)のときは,コマンド処理だけを行う if flag == 1: print commands[command_index] key = commands[ command_index] # commandsの中には'TLfblrudwcW'のどれかの文字が入っている if key == 'T': drone.takeoff() # 離陸 time.sleep(5) elif key == 'L': flag = 0 drone.land() # 着陸 time.sleep(4) elif key == 'u': drone.move_up(0.5) # 上昇 elif key == 'd': drone.move_down(0.5) # 下降 elif key == 'c': drone.rotate_ccw(20) # 左旋回 elif key == 'w': drone.rotate_cw(20) # 右旋回 elif key == 'f': drone.move_forward(0.5) # 前進 elif key == 'b': drone.move_backward(0.5) # 後進 elif key == 'l': drone.move_left(0.5) # 左移動 elif key == 'r': drone.move_right(0.5) # 右移動 elif key == 'W': time.sleep(5) # ウェイト command_index += 1 pre_time = time.time() # (X)ウィンドウに表示 cv2.imshow('OpenCV Window', gray_image) # ウィンドウに表示するイメージを変えれば色々表示できる # (Y)OpenCVウィンドウでキー入力を1ms待つ key = cv2.waitKey(1) if key == 27: # k が27(ESC)だったらwhileループを脱出,プログラム終了 break elif key == ord('t'): drone.takeoff() # 離陸 elif key == ord('l'): flag = 0 drone.land() # 着陸 # (Z)14秒おきに'command'を送って、死活チェックを通す current_time = time.time() # 現在時刻を取得 if current_time - pre_time > 14.0: # 前回時刻から14秒以上経過しているか? drone.send_command('command') # 'command'送信 pre_time = current_time # 前回時刻を更新 except (KeyboardInterrupt, SystemExit): # Ctrl+cが押されたら離脱 print("SIGINTを検知") # telloクラスを削除 del drone
def main(name, number): drone = tello.Tello('', DRONE_PORT) sleep(10) server_yolo = bridge.get_socket_server(HOST, YOLO_PORT) server_cm = bridge.get_socket_server(HOST, CM_PORT) takeoff_flag = raw_input("Do you want to take off the drone? (y / n)") if takeoff_flag == "y": drone.takeoff() sleep(5) drone.move_up(DEFAULT_HEIGHT) find_cm_flag = False extract_flag = False total_rotate = .0 while True: if not find_cm_flag: find_cm_flag, total_rotate = check_flag(drone, server_yolo, CLASS_INDEX["CM"], total_rotate) else: total_rotate = 0 find_cm_flag, extract_flag = movement.control_drone(drone, server_yolo, CLASS_INDEX["CM"]) if extract_flag: logging.info("Image been extracted!") break while True: _ = detect.get_frame(drone) _ = bridge.get_client_data(server_yolo, False) client_data = bridge.get_client_data(server_cm, False) logging.info("Input number: {}".format(number)) logging.info("Predicted number: {}".format(client_data)) numbers = get_number_from_string(number) if len(client_data) != 3: logging.info("Prediction failed") movement.random_rotate(drone) elif numbers[0] == client_data[0] and numbers[1] == client_data[1] and numbers[2] == client_data[2]: logging.info("\n\nPrediction succeeded!\n\n") break else: logging.info("\n\nWhat are you doing now???????\n\n") drone.rotate_cw(RANDOM_ROTATE) drone.rotate_ccw(RANDOM_ROTATE) empty_folder(SEGMENTATION_FOLDER) find_customer_flag = False zone_flag = False total_rotate = 0 while True: if not find_customer_flag: find_customer_flag, total_rotate = check_flag(drone, server_yolo, name, total_rotate) else: total_rotate = 0 find_customer_flag, zone_flag = movement.control_drone(drone, server_yolo, name) if zone_flag == "So closed": logging.info("\n\n\nFind Costumer!\n\n\n") drone.land() break # while True: # _ = get_frame(drone) # client_data = get_client_data(server_yolo, True) # # if name not in client_data[:, 1]: # drone.rotate_cw(30) # time.sleep(5) # else: # break # # while True: # frame = get_frame(drone) # client_data = get_client_data(server_yolo, True) # bbox_idx = np.where(client_data[:, 1] == name)[0][0] # # rotate_drone(drone, frame, client_data[bbox_idx]) # # frame = get_frame(drone) # client_data = get_client_data(server_yolo, True) # bbox_idx = np.where(client_data[:, 1] == name)[0][0] # translate_drone(drone, frame, client_data[bbox_idx]) # # frame = get_frame(drone) # client_data = get_client_data(server_yolo, True) # bbox_idx = np.where(client_data[:, 1] == name)[0][0] # isBack = forward_or_backward(drone, frame, cline_data[bbox_idx], False) # # if isBack == "So closed": # drone.land() # time.sleep(10) # break logging.info("Program end !!!")
def __init__(self, Dialog, device, bObjectDetection): super().__init__() self.setupUi(Dialog) self.bindFuncs() ###### self.carRenderW = 100 self.carRenderH = 100 self.bCaptureCar = False self.carDateTime = None self.dogRenderW = 100 self.dogRenderH = 100 self.bCaptureDog = False self.dogDateTime = None ###### self.logBuffer = [] self.logWriteTimer = QtCore.QTimer() self.logWriteTimer.timeout.connect(self.TimerEvent10ms) self.logWriteTimer.start(10) self.is_Connect=False self.delta_height=20 self.delta_rotate=45 self.delta_LR=20 self.delta_FB=20 self.delta_LD=20 self.delta_RD=20 self.tello = tello.Tello(self.log, self.stateReceive) self.updateIP() self.stateDict = { "pitch": "0", "roll": "0", "yaw": "0", "vgx": "0", "vgy": "0", "vgz": "0", "templ": "0", "temph": "0", "tof": "0", "h": "0", "bat": "0", "baro": "0.0", "time": "0", "agx": '0.0', "agy": '0.0', "agz": '0.0', "wifi": '99' } self.originFrame = None self.bObjectDetection = bObjectDetection if self.bObjectDetection: print('Object Detector initialize...') from ObjectDetector_Mobilenet_SSD import ObjectDetector_Mobilenet_SSD self.objDetector = ObjectDetector_Mobilenet_SSD(device, self.getOriginFrame) self.requestSNR = False self.button_on_off(False)
# # Created by: PyQt5 UI code generator 5.10.1 # # WARNING! All changes made in this file will be lost! import math import traceback import GlobalConfig import tello import numpy as np import time global drone PI = 3.14 sleepTime = 1 drone = tello.Tello(GlobalConfig.localIp, GlobalConfig.localPort, False, 20, "192.168.10.1", 8889) ''' tello的控制类 ''' class TelloController: ''' 根据打点计算飞行路线 ''' def fly(self, point, isDebug): global drone begin, end = False, False #是否截止点 step, total = 0, len(point) - 1 #计算飞行步数,当前步数 print("预计飞行[%d]次航程" % total) #起始向量,飞机头向下
def main(): drone = tello.Tello('', 8889) #drone._receive_video_thread() drone = DroneUI(drone) cv2.destroyAllWindows()
# main ############################################################################### print('Tello Controller ') print('+------------------------------------+') print('| ESC(quit) 1(360) 2(bounce) |') print('+------------------------------------+') print('| |') print('| w up |') print('| a d left right |') print('| s down |') print('| |') print('| space(takeoff/land) |') print('| |') print('+------------------------------------+') mDrone = tello.Tello() keyboard.hook(handleKey) while True: if isKeyPressed(KEY_MASK_ESC): break; # Toggle Keys if isKeyToggled(KEY_MASK_SPC): if isKeyPressed(KEY_MASK_SPC): mDrone.takeOff() print('take off') else: mDrone.land() print('land') clearToggle()
#Made modifications to API. Prints test; does not detect false flag. import tello import time drone = tello.Tello('', 9000, imperial=False) print(drone.takeoff()) time.sleep(6) print(drone.move_up(90)) time.sleep(2) print(drone.move_forward(300)) time.sleep(1) print(drone.rotate_cw(90)) time.sleep(2) print(drone.move_forward(200)) time.sleep(1) print(drone.rotate_cw(130)) time.sleep(2) print(drone.move_forward(250)) time.sleep(2) print(drone.land()) print('Flight time: %s' % drone.get_flight_time())
def main(): drone = tello.Tello('', 8889) ui = DroneUI(drone, "./img/") ui.root.mainloop()
'''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' ''' Drone Project Authors: Blake Legge, Lyndon Loveys, Nicholas Hodder, Annette Clarke, Daniel Harris ''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' # Use "pip install python-tk" if tkinter is not installed # Import tkinter module for creating gui import tkinter as tk # Import Tello module import tello as ta # Create handles for the Tello class in both tello.py and tello_abridged.py files t = ta.Tello() # creating the actual window window = tk.Tk() # Functions ''' DEFAULT_TELLO_COMMAND_IP - A constant representing the default IP address for commands on Tello per the documentation. ''' DEFAULT_TELLO_COMMAND_IP = "192.168.10.1" ''' DEFAULT_TELLO_COMMAND_PORT - A constant representing the default port for commands on Tello per the documentation. ''' DEFAULT_TELLO_COMMAND_PORT = 8889 # Create grid for everything
self.rotate_pid.init_last_time() def _pid_clear(self): self.foward_pid.clear() self.rotate_pid.clear() def _sendingCommand(self): """ start a while loop that sends 'command' to tello every 60 second """ while True: self.tello.get_battery() # self.tello.send_command('command') time.sleep(30) if __name__ == '__main__': # detect_model = darknet tracker = TelloTracker(darknet, tello.Tello('', 8889), output_video='test.mp4') tracker.track() # while True: # tracker.show_frame('frame') # key = cv2.waitKey(1) & 0xFF # if key == ord("q"): # tracker.close() # break
import tello import time my_drone = tello.Tello() print("start") print(my_drone.get_battery()) print(my_drone.get_temp()) my_drone.streamon() ''' my_drone.takeoff() my_drone.up(40) my_drone.back(30) my_drone.forward(30) time.sleep(120) my_drone.land() ''' time.sleep(120) my_drone.streamoff() print(my_drone.get_temp())
wifi = network.WLAN(network.STA_IF) wifi.active(True) wifi.connect(b'iPal_TELLO') i2c = I2C(scl=Pin(4), sda=Pin(5)) oled = SSD1306_I2C(128, 64, i2c) attempt = 1 while not wifi.isconnected(): oled.fill(0) oled.text('Connecting... {}'.format(attempt), 0, 0) oled.show() attempt += 1 time.sleep(0.5) drone = tello.Tello('192.168.10.2', 8888) oled.fill(0) drone.command('command') oled.text('tello initialized!', 0, 0) oled.show() time.sleep(2) oled.fill(0) oled.text('takeoff', 0, 0) drone.takeoff() oled.show() time.sleep(5) oled.fill(0) oled.text('flip left', 0, 0)
def main(): # OpenCVが持つARマーカーライブラリ「aruco」を使う aruco = cv2.aruco dictionary = aruco.getPredefinedDictionary( aruco.DICT_4X4_50) # ARマーカーは「4x4ドット,ID番号50まで」の辞書を使う # Telloクラスを使って,droneというインスタンス(実体)を作る drone = tello.Tello('', 8889, command_timeout=.01) current_time = time.time() # 現在時刻の保存変数 pre_time = current_time # 5秒ごとの'command'送信のための時刻変数 time.sleep(0.5) # 通信が安定するまでちょっと待つ pre_idno = None # 前回のID番号を記憶する変数 count = 0 # 同じID番号が見えた回数を記憶する変数 #Ctrl+cが押されるまでループ try: while True: # (A)画像取得 frame = drone.read() # 映像を1フレーム取得 if frame is None or frame.size == 0: # 中身がおかしかったら無視 continue # (B)ここから画像処理 image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # OpenCV用のカラー並びに変換する small_image = cv2.resize(image, dsize=(480, 360)) # 画像サイズを半分に変更 # ARマーカーの検出と,枠線の描画 corners, ids, rejectedImgPoints = aruco.detectMarkers( small_image, dictionary) #マーカを検出 aruco.drawDetectedMarkers(small_image, corners, ids, (0, 255, 0)) #検出したマーカ情報を元に,原画像に描画する # 50回同じマーカーが見えたらコマンド送信する処理 try: if ids != None: # idsが空(マーカーが1枚も認識されなかった)場合は何もしない idno = ids[0, 0] # idsには複数のマーカーが入っているので,0番目のマーカーを取り出す if idno == pre_idno: # 今回認識したidnoが前回のpre_idnoと同じ時には処理 count += 1 # 同じマーカーが見えてる限りはカウンタを増やす if count > 50: # 50回同じマーカーが続いたら,コマンドを確定する print("ID=%d" % (idno)) if idno == 0: drone.takeoff() # 離陸 elif idno == 1: drone.land() # 着陸 time.sleep(3) elif idno == 2: drone.move_up(0.3) # 上昇 elif idno == 3: drone.move_down(0.3) # 下降 elif idno == 4: drone.rotate_ccw(20) # 左旋回 elif idno == 5: drone.rotate_cw(20) # 右旋回 elif idno == 6: drone.move_forward(0.3) # 前進 elif idno == 7: drone.move_backward(0.3) # 後進 elif idno == 8: drone.move_left(0.3) # 左移動 elif idno == 9: drone.move_right(0.3) # 右移動 count = 0 # コマンド送信したらカウント値をリセット else: count = 0 pre_idno = idno # 前回のpre_idnoを更新する else: count = 0 # 何も見えなくなったらカウント値をリセット except ValueError, e: # if ids != None の処理で時々エラーが出るので,try exceptで捕まえて無視させる print("ValueError") # (X)ウィンドウに表示 cv2.imshow('OpenCV Window', small_image) # ウィンドウに表示するイメージを変えれば色々表示できる # (Y)OpenCVウィンドウでキー入力を1ms待つ key = cv2.waitKey(1) if key == 27: # k が27(ESC)だったらwhileループを脱出,プログラム終了 break elif key == ord('t'): drone.takeoff() # 離陸 elif key == ord('l'): drone.land() # 着陸 elif key == ord('w'): drone.move_forward(0.3) # 前進 elif key == ord('s'): drone.move_backward(0.3) # 後進 elif key == ord('a'): drone.move_left(0.3) # 左移動 elif key == ord('d'): drone.move_right(0.3) # 右移動 elif key == ord('q'): drone.rotate_ccw(20) # 左旋回 elif key == ord('e'): drone.rotate_cw(20) # 右旋回 elif key == ord('r'): drone.move_up(0.3) # 上昇 elif key == ord('f'): drone.move_down(0.3) # 下降 # (Z)5秒おきに'command'を送って、死活チェックを通す current_time = time.time() # 現在時刻を取得 if current_time - pre_time > 5.0: # 前回時刻から5秒以上経過しているか? drone.send_command('command') # 'command'送信 pre_time = current_time # 前回時刻を更新 except (KeyboardInterrupt, SystemExit): # Ctrl+cが押されたら離脱 print("SIGINTを検知") # telloクラスを削除 del drone