def detect(self): self.is_detected = False ret, frame = self.capture.read() grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # detection corners, ids, rejectedImgPoints = aruco.detectMarkers(grey, self.dictionary) if ids is not None: if len(ids) == 2: self.is_detected = True if self.is_detected: for index in range(len(ids)): if ids[index][0] == 0: rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners[index], self.marker_size_world, camera_config_py.camera_matrix, camera_config_py.distortion) self.pose_world = Pose(cv2.Rodrigues(rvec[0])[0], tvec[0].T) # draw axis for the aruco markers aruco.drawAxis(grey, camera_config_py.camera_matrix, camera_config_py.distortion, rvec, tvec, self.marker_size_world) if ids[index][0] == 1: rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners[index], self.marker_size_tello, camera_config_py.camera_matrix, camera_config_py.distortion) self.pose_tello = Pose(cv2.Rodrigues(rvec[0])[0], tvec[0].T) # draw axis for the aruco markers aruco.drawAxis(grey, camera_config_py.camera_matrix, camera_config_py.distortion, rvec, tvec, self.marker_size_tello) self.pose_world_to_tello = Pose(np.dot(self.pose_world.R.T, self.pose_tello.R), np.dot(self.pose_world.R.T, self.pose_tello.t - self.pose_world.t)) self.image = grey
def __init__(self): self.url = "http://192.168.3.22:8080/?action=stream" self.capture = cv2.VideoCapture(self.url) self.dictionary = aruco.custom_dictionary(10, 6) self.pose_world = Pose() self.pose_tello = Pose() self.pose_world_to_tello = Pose() self.marker_size_world = 0.04 self.marker_size_tello = 0.02 self.image = np.zeros((1, 1), dtype=np.float32) self.is_detected = False
def __init__(self, path=None): self.__models = {1: Pose.Pose()} self.__header = '' self.__inputFile = path if path != None: self.__inputFile = path self.read_pdb()
def read_pdb(self): model = 1 self.__models[model] = Pose.Pose(model) for line in open(self.__inputFile, 'r'): if "MODEL" == line[0:5]: model = int(string.split(line)[1]) self.__models[model] = Pose.Pose(model) elif (("ATOM" == line[0:4]) or ("HETATM" == line[0:6])): self.__models[model].add_atom(PDB_util.pdb2Atom(line)) elif (("TER" == line[0:3]) or ("ENDMDL" == line[0:6]) or ("END" == line[0:3])): continue else: self.__header += line
def __init__(self): self.dictionary = aruco.custom_dictionary(10, 6) self.pose_world = Pose() # self.pose_tello = Pose() # self.pose_world_to_tello = Pose() self.marker_size_world = 0.2 # self.marker_size_tello = 0.02 self.image_out = np.zeros((1, 1), dtype=np.float32) self.is_detected = False
def converge(self): """ If most particles are around the same location, compute the mean and publish :return: None """ global root threshold_converge = 1 center = Pose(0, 0, 0) sumX, sumY, sumT = 0, 0, 0 sse = 0 x_lst, y_lst, t_lst = [], [], [] n = len(self.particles) for p in self.particles: sumX += p.pose.x sumY += p.pose.y sumT += p.pose.theta center.x, center.y = sumX / n, sumY / n for i in range(n): p = self.particles[i] dist = math.sqrt( math.pow((p.pose.x - center.x), 2) + math.pow((p.pose.y - center.y), 2)) if dist > threshold_converge: print( str(i) + ': (' + str(center.x) + ',' + str(center.y) + ')') self.converged = False return print(len(self.particles)) # update particles as mean the particle self.particles = [Particle(center.x, center.y, sumT / n, 1)] self.converged = True print('Converged at: (' + str(center.x) + ',' + str(center.y) + ') , theta:' + str(sumT / n)) print('Time: ' + str(time.time() - self.start_time)) loc_x = self.particles[0].pose.x loc_y = self.particles[0].pose.y loc_t = self.particles[0].pose.theta self.pub_pose.publish(str(loc_x) + "," + str(loc_y) + "," + str(loc_t)) root.quit() rospy.signal_shutdown("Shutting down Localization node...........")
def detect(self, image): self.is_detected = False # detection corners, ids, rejectedImgPoints = aruco.detectMarkers( image, self.dictionary) if ids is not None: if len(ids) == 1: self.is_detected = True if self.is_detected: for index in range(len(ids)): if ids[index][0] == 2: rvec, tvec, _ = aruco.estimatePoseSingleMarkers( corners[index], self.marker_size_world, camera_config_tello.camera_matrix, camera_config_tello.distortion) self.pose_world = Pose( cv2.Rodrigues(rvec[0])[0], tvec[0].T) # transform mat_t = np.dot( Pose.rotational_x(-pi * 0.5).T, self.pose_world.R.T) self.pose_world.R = np.dot(mat_t, Pose.rotational_x(pi * 0.5)) self.pose_world.t = -np.dot(mat_t, self.pose_world.t) # draw axis for the aruco markers aruco.drawAxis(image, camera_config_tello.camera_matrix, camera_config_tello.distortion, rvec, tvec, self.marker_size_world) # if ids[index][0] == 1: # rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners[index], self.marker_size_tello, # camera_config_py.camera_matrix, # camera_config_py.distortion) # self.pose_tello = Pose(cv2.Rodrigues(rvec[0])[0], tvec[0].T) # # draw axis for the aruco markers # aruco.drawAxis(grey, camera_config_py.camera_matrix, camera_config_py.distortion, rvec, tvec, # self.marker_size_tello) # self.pose_world_to_tello = Pose(np.dot(self.pose_world.R.T, self.pose_tello.R), # np.dot(self.pose_world.R.T, self.pose_tello.t - self.pose_world.t)) self.image_out = image
# anglerpitch=degrees(atan(self.acc_x/self.acc_z)) anglerroll = roll anglerpitch = -pitch visual_state = self.visual_state return bat, is_fly, tftimer, height, wifi, anglerroll, anglerpitch, velz, velxy, posx, posy, posz, pitch, roll, yew, visual_state # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 if __name__ == '__main__': ispose = 0 fps = FPS() tello = Tello() if ispose == 1: my_pose = Pose() frame_skip = 300 for frame in tello.container.decode(video=0): if 0 < frame_skip: frame_skip = frame_skip - 1 continue fps.update() start_time = time.time() image = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR) if ispose == 1: image = cv2.resize(image, (640, 480)) show = my_pose.get_kp(image) if show[2][0]: # 值判断一个就好 cv2.circle(image, (show[2][0], show[2][1]), 3, (0, 0, 255), -1)