def __call__(self, input): """ Two operations: 1. scorefxn( pose ): # ** noteworthy: the pose object here is Pose > assign Residue object for each position, and > return a total score of it 2. scorefxn( frag_idx ): > return frag score for the frag_idx to a pose """ assert isPose(input) or isFragIdx(input) if isPose(input): # a residual pose """ 1. update the residue of the verbose_pose to be composed of Residue objects 2. return a total score """ residual_pose = input self.update_pose(residual_pose) # the frag_idx in the input has been update to self.__pose # use the new ScoreTable to rescore it residual_pose output_pose = Pose() for frag_idx in self.__pose: output_pose.update_residue(self.score_evaluator(frag_idx, True)) return output_pose elif isFragIdx(input): """ return a residue score """ return self.score_evaluator(input)
def detect(self): self.is_detected = False ret, frame = self.capture.read() grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # detection corners, ids, rejectedImgPoints = aruco.detectMarkers(grey, self.dictionary) if ids is not None: if len(ids) == 2: self.is_detected = True if self.is_detected: for index in range(len(ids)): if ids[index][0] == 0: rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners[index], self.marker_size_world, camera_config_py.camera_matrix, camera_config_py.distortion) self.pose_world = Pose(cv2.Rodrigues(rvec[0])[0], tvec[0].T) # draw axis for the aruco markers aruco.drawAxis(grey, camera_config_py.camera_matrix, camera_config_py.distortion, rvec, tvec, self.marker_size_world) if ids[index][0] == 1: rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners[index], self.marker_size_tello, camera_config_py.camera_matrix, camera_config_py.distortion) self.pose_tello = Pose(cv2.Rodrigues(rvec[0])[0], tvec[0].T) # draw axis for the aruco markers aruco.drawAxis(grey, camera_config_py.camera_matrix, camera_config_py.distortion, rvec, tvec, self.marker_size_tello) self.pose_world_to_tello = Pose(np.dot(self.pose_world.R.T, self.pose_tello.R), np.dot(self.pose_world.R.T, self.pose_tello.t - self.pose_world.t)) self.image = grey
def residualize_pose(self): """ Pose() has methods to evaluate the state """ residual_pose = Pose() for frag_idx in self.__pose: residual_pose.update_residue(self.score_evaluator(frag_idx, True)) return residual_pose
def __init__(self): self.url = "http://192.168.3.22:8080/?action=stream" self.capture = cv2.VideoCapture(self.url) self.dictionary = aruco.custom_dictionary(10, 6) self.pose_world = Pose() self.pose_tello = Pose() self.pose_world_to_tello = Pose() self.marker_size_world = 0.04 self.marker_size_tello = 0.02 self.image = np.zeros((1, 1), dtype=np.float32) self.is_detected = False
def __init__(self, path=None): self.__models = {1: Pose.Pose()} self.__header = '' self.__inputFile = path if path != None: self.__inputFile = path self.read_pdb()
def read_pdb(self): model = 1 self.__models[model] = Pose.Pose(model) for line in open(self.__inputFile, 'r'): if "MODEL" == line[0:5]: model = int(string.split(line)[1]) self.__models[model] = Pose.Pose(model) elif (("ATOM" == line[0:4]) or ("HETATM" == line[0:6])): self.__models[model].add_atom(PDB_util.pdb2Atom(line)) elif (("TER" == line[0:3]) or ("ENDMDL" == line[0:6]) or ("END" == line[0:3])): continue else: self.__header += line
def __init__(self): self.dictionary = aruco.custom_dictionary(10, 6) self.pose_world = Pose() # self.pose_tello = Pose() # self.pose_world_to_tello = Pose() self.marker_size_world = 0.2 # self.marker_size_tello = 0.02 self.image_out = np.zeros((1, 1), dtype=np.float32) self.is_detected = False
def converge(self): """ If most particles are around the same location, compute the mean and publish :return: None """ global root threshold_converge = 1 center = Pose(0, 0, 0) sumX, sumY, sumT = 0, 0, 0 sse = 0 x_lst, y_lst, t_lst = [], [], [] n = len(self.particles) for p in self.particles: sumX += p.pose.x sumY += p.pose.y sumT += p.pose.theta center.x, center.y = sumX / n, sumY / n for i in range(n): p = self.particles[i] dist = math.sqrt( math.pow((p.pose.x - center.x), 2) + math.pow((p.pose.y - center.y), 2)) if dist > threshold_converge: print( str(i) + ': (' + str(center.x) + ',' + str(center.y) + ')') self.converged = False return print(len(self.particles)) # update particles as mean the particle self.particles = [Particle(center.x, center.y, sumT / n, 1)] self.converged = True print('Converged at: (' + str(center.x) + ',' + str(center.y) + ') , theta:' + str(sumT / n)) print('Time: ' + str(time.time() - self.start_time)) loc_x = self.particles[0].pose.x loc_y = self.particles[0].pose.y loc_t = self.particles[0].pose.theta self.pub_pose.publish(str(loc_x) + "," + str(loc_y) + "," + str(loc_t)) root.quit() rospy.signal_shutdown("Shutting down Localization node...........")
def detect(self, image): self.is_detected = False # detection corners, ids, rejectedImgPoints = aruco.detectMarkers( image, self.dictionary) if ids is not None: if len(ids) == 1: self.is_detected = True if self.is_detected: for index in range(len(ids)): if ids[index][0] == 2: rvec, tvec, _ = aruco.estimatePoseSingleMarkers( corners[index], self.marker_size_world, camera_config_tello.camera_matrix, camera_config_tello.distortion) self.pose_world = Pose( cv2.Rodrigues(rvec[0])[0], tvec[0].T) # transform mat_t = np.dot( Pose.rotational_x(-pi * 0.5).T, self.pose_world.R.T) self.pose_world.R = np.dot(mat_t, Pose.rotational_x(pi * 0.5)) self.pose_world.t = -np.dot(mat_t, self.pose_world.t) # draw axis for the aruco markers aruco.drawAxis(image, camera_config_tello.camera_matrix, camera_config_tello.distortion, rvec, tvec, self.marker_size_world) # if ids[index][0] == 1: # rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners[index], self.marker_size_tello, # camera_config_py.camera_matrix, # camera_config_py.distortion) # self.pose_tello = Pose(cv2.Rodrigues(rvec[0])[0], tvec[0].T) # # draw axis for the aruco markers # aruco.drawAxis(grey, camera_config_py.camera_matrix, camera_config_py.distortion, rvec, tvec, # self.marker_size_tello) # self.pose_world_to_tello = Pose(np.dot(self.pose_world.R.T, self.pose_tello.R), # np.dot(self.pose_world.R.T, self.pose_tello.t - self.pose_world.t)) self.image_out = image
# specified method and parameters parser = argparse.ArgumentParser(description='Set the aircraft poses from flight data.') parser.add_argument('--project', required=True, help='project directory') parser.add_argument('--sentera', help='use the specified sentera image-metadata.txt file (lat,lon,alt,yaw,pitch,roll)') parser.add_argument('--pix4d', help='use the specified pix4d csv file (lat,lon,alt,roll,pitch,yaw)') args = parser.parse_args() proj = ProjectMgr.ProjectMgr(args.project) print "Loading image info..." proj.load_image_info() pose_set = False if args.sentera != None: Pose.setAircraftPoses(proj, args.sentera, order='ypr') pose_set = True elif args.pix4d != None: Pose.setAircraftPoses(proj, args.pix4d, order='rpy') pose_set = True if not pose_set: print "Error: no flight data specified or problem with flight data" print "No poses computed" exit # compute the project's NED reference location (based on average of # aircraft poses) proj.compute_ned_reference_lla() print "NED reference location:", proj.ned_reference_lla
#!/usr/bin/python3 import argparse import sys sys.path.append('../lib') import Pose import ProjectMgr # for all the images in the project image_dir, compute the camera poses from # the aircraft pose (and camera mounting transform) parser = argparse.ArgumentParser(description='Set the initial camera poses.') parser.add_argument('--project', required=True, help='project directory') args = parser.parse_args() proj = ProjectMgr.ProjectMgr(args.project) proj.load_images_info() Pose.compute_camera_poses(proj) proj.save_images_info()
help= 'use the specified sentera image-metadata.txt file (lat,lon,alt,yaw,pitch,roll)' ) parser.add_argument( '--pix4d', help='use the specified pix4d csv file (lat,lon,alt,roll,pitch,yaw)') args = parser.parse_args() proj = ProjectMgr.ProjectMgr(args.project) print("Loading image info...") proj.load_images_info() pose_set = False if args.sentera != None: Pose.setAircraftPoses(proj, args.sentera, order='ypr') pose_set = True elif args.pix4d != None: Pose.setAircraftPoses(proj, args.pix4d, order='rpy') pose_set = True if not pose_set: print("Error: no flight data specified or problem with flight data") print("No poses computed") quit() # compute the project's NED reference location (based on average of # aircraft poses) proj.compute_ned_reference_lla() ned_node = getNode('/config/ned_reference', True) print("NED reference location:")
import sys sys.path.insert(0, "/usr/local/lib/python2.7/site-packages/") import argparse import commands import cv2 import fnmatch import os.path sys.path.append('../lib') import Pose import ProjectMgr # for all the images in the project image_dir, compute the camera poses from # the aircraft pose (and camera mounting transform) parser = argparse.ArgumentParser(description='Set the initial camera poses.') parser.add_argument('--project', required=True, help='project directory') args = parser.parse_args() proj = ProjectMgr.ProjectMgr(args.project) proj.load_image_info() Pose.compute_camera_poses(proj.image_list, proj.cam, proj.ned_reference_lla, force=True) proj.save_images_meta()
def ai(self, target, time): #Force death when health reaches 0 if self.health <= 0: self.image = self.died #Do nothing when target is dead if (target.health <= 0 or target.on_stun == True) and self.health > 0 and self.on_damage == False and self.on_attack == False and self.on_stun == False: self.doing_nothing = True if self.look_right: self.breathing_right.play() self.image = self.breathing_right.get_current_image() else: self.breathing_left.play() self.image = self.breathing_left.get_current_image() #Move elif self.health > 0 and self.on_damage == False and self.on_cover == False and self.on_attack == False and self.on_stun == False and target.on_stun == False: self.doing_nothing = False if (self.rect.centerx < target.rect.centerx and target.look_left == True) or (self.rect.centerx > target.rect.centerx and target.look_right == True): self.on_locked = True else: self.on_locked = False in_advantage = abs(self.rect.centerx - target.rect.centerx) > 230 #Make decition if self.pose == "crafty": decition = Pose.crafty([self.on_locked, target.on_attack, target.on_cover, target.on_air, in_advantage]) elif self.pose == "coward": decition = Pose.coward([self.on_locked, target.on_attack, target.on_cover, target.on_air, in_advantage]) elif self.pose == "aggressive": decition = Pose.aggressive([self.on_locked, target.on_attack, target.on_cover, target.on_air, in_advantage]) if decition == 1: #Reach target #Up if self.rect.bottom>(self.HEIGHT - 70): if self.rect.centery > target.rect.centery: if self.look_right: self.looking_right.play() self.image = self.looking_right.get_current_image() elif self.look_left: self.looking_left.play() self.image = self.looking_left.get_current_image() self.rect.centery -= self.speed_x * time #Down if self.rect.bottom<self.HEIGHT: if self.rect.centery < target.rect.centery: if self.look_right: self.looking_right.play() self.image = self.looking_right.get_current_image() elif self.look_left: self.looking_left.play() self.image = self.looking_left.get_current_image() self.rect.centery += self.speed_x * time #Right if self.rect.right<self.WIDTH: if target.rect.centerx>self.rect.centerx: self.look_left = False self.look_right = True self.looking_right.play() self.image = self.looking_right.get_current_image() self.rect.centerx += self.speed_x * time #Left if self.rect.left>0: if target.rect.centerx<self.rect.centerx: self.look_left = True self.look_right = False self.looking_left.play() self.image = self.looking_left.get_current_image() self.rect.centerx -= self.speed_x * time #Attack if target.health > 0: if self.ready2attack: #Close if abs(self.rect.centerx - target.rect.centerx) < 80: self.power = self.sword_power self.swoosh.get_sfx().play() if self.look_right: self.image = self.attacking_right else: self.image = self.attacking_left self.on_cover = False self.attack_duration = 10 self.ready2attack = False self.reload_time = 30 self.on_attack = True #Far if abs(self.rect.centerx - target.rect.centerx) < self.rifle_range: if abs(self.rect.centerx - target.rect.centerx) > 230: self.power = self.rifle_power + (-1 * (abs(int(self.rect.centerx - target.rect.centerx)/100))) self.shot.get_sfx().play() if target.rect.centerx > self.rect.centerx: self.look_right = True self.look_left = False self.image = self.attacking_right_far else: self.look_right = False self.look_left = True self.image = self.attacking_left_far self.on_cover = False self.attack_duration = 30 self.ready2attack = False self.reload_time = 70 self.on_attack = True elif decition == -1: #Retreat if target.health > 0: #Attack when in range if self.ready2attack == True and (abs(self.rect.centerx - target.rect.centerx) < self.rifle_range and abs(self.rect.centerx - target.rect.centerx) > 230): self.power = self.rifle_power + (-1 * (abs(int(self.rect.centerx - target.rect.centerx)/100))) self.shot.get_sfx().play() if target.rect.centerx > self.rect.centerx: self.look_right = True self.look_left = False self.image = self.attacking_right_far else: self.look_right = False self.look_left = True self.image = self.attacking_left_far self.on_cover = False self.attack_duration = 10 self.ready2attack = False self.reload_time = 70 self.on_attack = True #Close attack elif self.ready2attack == True and abs(self.rect.centerx - target.rect.centerx) < 30: self.power = self.sword_power self.swoosh.get_sfx().play() if self.look_right: self.image = self.attacking_right else: self.image = self.attacking_left self.on_cover = False self.attack_duration = 10 self.ready2attack = False self.reload_time = 30 self.on_attack = True #Go away if not else: #Up if self.rect.bottom>(self.HEIGHT - 70): if self.rect.centery < target.rect.centery: if self.look_right: self.looking_right.play() self.image = self.looking_right.get_current_image() elif self.look_left: self.looking_left.play() self.image = self.looking_left.get_current_image() self.rect.centery -= self.speed_x * time #Down if self.rect.bottom<self.HEIGHT: if self.rect.centery > target.rect.centery: if self.look_right: self.looking_right.play() self.image = self.looking_right.get_current_image() elif self.look_left: self.looking_left.play() self.image = self.looking_left.get_current_image() self.rect.centery += self.speed_x * time #Right if self.rect.right<self.WIDTH: if target.rect.centerx<self.rect.centerx: self.look_left = False self.look_right = True self.looking_right.play() self.image = self.looking_right.get_current_image() self.rect.centerx += self.speed_x * time #Left if self.rect.left>0: if target.rect.centerx>self.rect.centerx: self.look_left = True self.look_right = False self.looking_left.play() self.image = self.looking_left.get_current_image() self.rect.centerx -= self.speed_x * time
# anglerpitch=degrees(atan(self.acc_x/self.acc_z)) anglerroll = roll anglerpitch = -pitch visual_state = self.visual_state return bat, is_fly, tftimer, height, wifi, anglerroll, anglerpitch, velz, velxy, posx, posy, posz, pitch, roll, yew, visual_state # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 if __name__ == '__main__': ispose = 0 fps = FPS() tello = Tello() if ispose == 1: my_pose = Pose() frame_skip = 300 for frame in tello.container.decode(video=0): if 0 < frame_skip: frame_skip = frame_skip - 1 continue fps.update() start_time = time.time() image = cv2.cvtColor(numpy.array(frame.to_image()), cv2.COLOR_RGB2BGR) if ispose == 1: image = cv2.resize(image, (640, 480)) show = my_pose.get_kp(image) if show[2][0]: # 值判断一个就好 cv2.circle(image, (show[2][0], show[2][1]), 3, (0, 0, 255), -1)