def setup(self, mission): if mission is None: self.isSetup = False return np.random.seed(mission.planning.get('seed', None)) # setting the mean and norm weights (used in reachability check) cc = mission.planning['setup']['controller'] SE2BeliefState.covNormWeight = cc['norm_covariance_weight'] SE2BeliefState.meanNormWeight = cc['norm_mean_weight'] SE2BeliefState.reachDist = cc['reach_dist'] # set the state component norm weights SE2BeliefState.normWeights = np.array(cc['norm_state_weights']) # load environment self.env = mission.environment # load robot model assert mission.no_active_vehicles == 1 self.robotModel = mission.vehicles.itervalues().next() # read the start Pose x, y, yaw = self.robotModel['initial_state'] cov = self.robotModel['initial_covariance'] self.start = SE2BeliefState(x, y, yaw, cov) # read the specification self.spec = mission.specification # read planning time self.planningTime = mission.planning['planning_time'] self.planningSteps = mission.planning['planning_steps'] self.sat_prob_threshold = mission.planning['sat_probability_threshold'] logging.info('Problem configuration is') logging.info("Start Pose: %s", str(self.start)) logging.info("Specification: %s", self.spec) logging.info("Planning Time: %f seconds", self.planningTime) # create the observation model observationModel = CameraLocalization(self.robotModel, self.env) # create the motion model motionModel = UnicycleMotionModel(self.robotModel) # create planner self.planner = FIRM(self.env, motionModel, observationModel, mission.planning['setup']) # set specification state_label = self.robotModel['motion_model']['state_label'] cov_label = self.robotModel['motion_model']['covariance_label'] data = dict() execfile(mission.predicates, data) predicates = data['predicates'] self.planner.obstacles = data['obstacles'] del data assert predicates is not None, \ 'You need to provide at least an empty dictionary!' self.planner.setSpecification(self.spec, state_label=state_label, cov_label=cov_label, predicates=predicates) # set initial state self.planner.addState(self.start, initial=True) # set sampler self.planner.sampler = SE2StateSampler(self.planner.bounds + np.array([[0.2, 0.2], [-0.2, -0.2]])) #TODO: make this general
def test_observation_model(): isSimulation = True if not isSimulation: rospy.init_node("ObservationModelTest", anonymous=True) # 1. load mission and environment mission = Mission.from_file( '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/simple/mission.yaml' ) logging.info('\n' + str(mission)) logging.info('Seed: %s', mission.simulation.get('seed', None)) # load environment env = mission.environment # load robot model assert mission.no_active_vehicles == 1 robotModel = mission.vehicles.itervalues().next() print robotModel['observation_model'] cam = CameraLocalization(robotModel, env, isSimulation=isSimulation) if not isSimulation: rospy.spin()
def test_controller(): # from optitrack import OptiTrackInterface from numpy.linalg import norm isSimulation = False if not isSimulation: rospy.init_node("ControllerTest", anonymous=True) # 1. load mission and environment mission = Mission.from_file( '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/simple/mission.yaml' ) logging.info('\n' + str(mission)) logging.info('Seed: %s', mission.simulation.get('seed', None)) # load environment env = mission.environment # load robot model assert mission.no_active_vehicles == 1 robotModel = mission.vehicles.itervalues().next() print 'Motion model:', robotModel['motion_model'] cc = mission.planning['setup']['controller'] SE2BeliefState.covNormWeight = 0 * cc['norm_covariance_weight'] SE2BeliefState.meanNormWeight = 1 #cc['norm_mean_weight'] SE2BeliefState.reachDist = cc['reach_dist'] # set the state component norm weights SE2BeliefState.normWeights = np.array([1, 1, 0]) #cc['norm_state_weights']) SE2BeliefState.normWeights[2] = 0 robot = UnicycleMotionModel(robotModel, isSimulation) print 'Time step:', robot.dt robot.rate.sleep() print 'Observation model:', robotModel['observation_model'] cam = CameraLocalization(robotModel, env, isSimulation=isSimulation) robot.rate.sleep() #--------------------------------------------------------------------------- d_epsilon = 0.05 # error bound around waypoint a_epsilon = np.deg2rad(15) # error bound for turning # rid = 1 # opti = OptiTrackInterface() # body = opti.trackInfo()[rid] # get data for rigid body w/ rigid body ID `rid' # opti._close() x, y, theta = cam.getObservation( None) #body.z, body.x, np.deg2rad(body.yaw) print x, y, theta #--------------------------------------------------------------------------- def save_trajectory(): true_traj = [] traj = [] while True: state = yield traj, true_traj print 'save state:', state.conf traj.append(state.copy()) true_traj.append( cam.getObservation(state)) #TODO: change this back # true_traj.append(cam.getObservationPrediction(state)) # filter_time = 4 # [sec] # filter_stab = int(filter_time/robot.dt) # print 'filter steps:', filter_stab, 'dt:', robot.dt path = [(x, y), (x + 1, y + 1)] #, (x+2, y+1), (x+1, y), (x, y)] for (xs, ys), (xg, yg) in zip(path[:-1], path[1:]): start = SE2BeliefState(xs, ys, theta) goal = SE2BeliefState(xg, yg, np.pi / 2) # NOTE: end orientation does not matter print 'Start:', start.conf, 'Goal:', goal.conf tlqr = SwitchingController(robot, cam, goal, stateWeight=np.diag([1, 1, 200]), controlWeight=np.eye(2)) ekf = ExtendedKF(cam, robot) controller = Controller( robot, cam, tlqr, ekf, withNoise=True) #FIXME: withNoise option is not ok process = save_trajectory() process.next() _, (trajectory, true_trajectory) = controller.execute(start, process) robot.execute([0, 0]) print len(trajectory) # for _ in range(filter_stab): # ekf # robot.execute([0, 0]) print print 'Trajectories:' for p, tp in zip(trajectory, true_trajectory): print 'C:', p.conf, 'T:', tp print print 'Start theta:', theta theta = trajectory[-1].conf[2] print 'End theta:', theta print print
def test_controller__(): isSimulation = False if not isSimulation: rospy.init_node("ControllerTest", anonymous=True) # 1. load mission and environment mission = Mission.from_file( '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/simple/mission.yaml' ) logging.info('\n' + str(mission)) logging.info('Seed: %s', mission.simulation.get('seed', None)) # load environment env = mission.environment # load robot model assert mission.no_active_vehicles == 1 robotModel = mission.vehicles.itervalues().next() print 'Motion model:', robotModel['motion_model'] cc = mission.planning['setup']['controller'] SE2BeliefState.covNormWeight = cc['norm_covariance_weight'] SE2BeliefState.meanNormWeight = cc['norm_mean_weight'] SE2BeliefState.reachDist = cc['reach_dist'] # set the state component norm weights SE2BeliefState.normWeights = np.array(cc['norm_state_weights']) SE2BeliefState.normWeights[2] = 0 robot = UnicycleMotionModel(robotModel, isSimulation) print 'Time step:', robot.dt # from robots.X80Pro import X80Pro # import time # robot.rate.sleep() # # u = 0.25 # t0 = time.time() # print 'Applying control', t0, 'control in pulse:', X80Pro.vel2pulse(u) # # robot.execute([0, 6.5]) # TODO: rotation -> np.pi # robot.execute([u, 0]) # linear # print 'Control sent', time.time() - t0 # time.sleep(4-0.25) # print 'Stop robot', time.time() - t0 # robot.execute([0, 0]) # print 'Done', time.time() - t0 # return print 'Observation model:', robotModel['observation_model'] cam = CameraLocalization(robotModel, env, isSimulation=isSimulation) def save_trajectory(): true_traj = [] traj = [] while True: state = yield traj, true_traj print 'save state:', state.conf traj.append(state.copy()) true_traj.append( cam.getObservation(state)) #TODO: change this back # true_traj.append(cam.getObservationPrediction(state)) start = SE2BeliefState(0.240, 1.070, 0) goal = SE2BeliefState(0.240, 1 + 1.070, np.pi / 2) # NOTE: end orientation does not matter tlqr = SwitchingController(robot, cam, goal, stateWeight=np.diag([1, 1, 200]), controlWeight=np.eye(2)) ekf = ExtendedKF(cam, robot) controller = Controller(robot, cam, tlqr, ekf, withNoise=True) #FIXME: withNoise option is not ok process = save_trajectory() process.next() _, (trajectory, true_trajectory) = controller.execute(start, process) robot.execute([0, 0]) print len(trajectory) # plt.figure() # plt.axis('equal') # # for lss in tlqr.lss: # # s = lss.x # # plt.arrow(s.x, s.y, 0.1*np.cos(s.yaw), 0.1*np.sin(s.yaw), hold=True, color='g') # for x, y, yaw in true_trajectory: # plt.arrow(x, y, 0.1*np.cos(yaw), 0.1*np.sin(yaw), hold=True, color='k') # for s in trajectory: # plt.arrow(s.x, s.y, 0.1*np.cos(s.yaw), 0.1*np.sin(s.yaw), hold=True, color='b') # s = goal # plt.arrow(s.x, s.y, 0.1*np.cos(s.yaw), 0.1*np.sin(s.yaw), hold=True, color='g') # # # plt.xlim([0, 4.13]) # # plt.ylim([0, 3.54]) # plt.xlim([-0.7, 1.2]) # plt.ylim([1.1, 2.7]) # plt.show() print
def test_controller(): # from optitrack import OptiTrackInterface from numpy.linalg import norm isSimulation = False if not isSimulation: rospy.init_node("ControllerTest", anonymous=True) # 1. load mission and environment mission = Mission.from_file('/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/simple/mission.yaml') logging.info('\n' + str(mission)) logging.info('Seed: %s', mission.simulation.get('seed', None)) # load environment env = mission.environment # load robot model assert mission.no_active_vehicles == 1 robotModel = mission.vehicles.itervalues().next() print 'Motion model:', robotModel['motion_model'] cc = mission.planning['setup']['controller'] SE2BeliefState.covNormWeight = 0*cc['norm_covariance_weight'] SE2BeliefState.meanNormWeight = 1 #cc['norm_mean_weight'] SE2BeliefState.reachDist = cc['reach_dist'] # set the state component norm weights SE2BeliefState.normWeights = np.array([1, 1, 0]) #cc['norm_state_weights']) SE2BeliefState.normWeights[2] = 0 robot = UnicycleMotionModel(robotModel, isSimulation) print 'Time step:', robot.dt robot.rate.sleep() print 'Observation model:', robotModel['observation_model'] cam = CameraLocalization(robotModel, env, isSimulation=isSimulation) robot.rate.sleep() #--------------------------------------------------------------------------- d_epsilon=0.05 # error bound around waypoint a_epsilon = np.deg2rad(15) # error bound for turning # rid = 1 # opti = OptiTrackInterface() # body = opti.trackInfo()[rid] # get data for rigid body w/ rigid body ID `rid' # opti._close() x, y, theta = cam.getObservation(None) #body.z, body.x, np.deg2rad(body.yaw) print x, y, theta #--------------------------------------------------------------------------- def save_trajectory(): true_traj = [] traj = [] while True: state = yield traj, true_traj print 'save state:', state.conf traj.append(state.copy()) true_traj.append(cam.getObservation(state)) #TODO: change this back # true_traj.append(cam.getObservationPrediction(state)) # filter_time = 4 # [sec] # filter_stab = int(filter_time/robot.dt) # print 'filter steps:', filter_stab, 'dt:', robot.dt path = [(x, y), (x+1, y+1)]#, (x+2, y+1), (x+1, y), (x, y)] for (xs, ys), (xg, yg) in zip(path[:-1], path[1:]): start = SE2BeliefState(xs, ys, theta) goal = SE2BeliefState(xg, yg, np.pi/2) # NOTE: end orientation does not matter print 'Start:', start.conf, 'Goal:', goal.conf tlqr = SwitchingController(robot, cam, goal, stateWeight=np.diag([1, 1, 200]), controlWeight=np.eye(2)) ekf = ExtendedKF(cam, robot) controller = Controller(robot, cam, tlqr, ekf, withNoise=True) #FIXME: withNoise option is not ok process = save_trajectory() process.next() _, (trajectory, true_trajectory) = controller.execute(start, process) robot.execute([0, 0]) print len(trajectory) # for _ in range(filter_stab): # ekf # robot.execute([0, 0]) print print 'Trajectories:' for p, tp in zip(trajectory, true_trajectory): print 'C:', p.conf, 'T:', tp print print 'Start theta:', theta theta = trajectory[-1].conf[2] print 'End theta:', theta print print
def test_filter(): # from optitrack import OptiTrackInterface from numpy.linalg import norm import time isSimulation = False if not isSimulation: rospy.init_node("ControllerTest", anonymous=True) # 1. load mission and environment mission = Mission.from_file( '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/simple/mission.yaml' ) logging.info('\n' + str(mission)) logging.info('Seed: %s', mission.simulation.get('seed', None)) # load environment env = mission.environment # load robot model assert mission.no_active_vehicles == 1 robotModel = mission.vehicles.itervalues().next() print 'Motion model:', robotModel['motion_model'] robot = UnicycleMotionModel(robotModel, isSimulation) print 'Time step:', robot.dt robot.rate.sleep() print 'Observation model:', robotModel['observation_model'] cam = CameraLocalization(robotModel, env, isSimulation=isSimulation) robot.rate.sleep() #--------------------------------------------------------------------------- x, y, theta = cam.getObservation( None) #body.z, body.x, np.deg2rad(body.yaw) print x, y, theta #--------------------------------------------------------------------------- def save_trajectory(): true_traj = [] traj = [] while True: state = yield traj, true_traj print 'save state:', state.conf traj.append(state.copy()) true_traj.append( cam.getObservation(state)) #TODO: change this back # true_traj.append(cam.getObservationPrediction(state)) goal = SE2BeliefState(x, y, 0) tlqr = SwitchingController(robot, cam, goal, stateWeight=np.diag([1, 1, 200]), controlWeight=np.eye(2)) ekf = ExtendedKF(cam, robot) updated_state = SE2BeliefState(x, y, theta, cov=np.eye(3)) print 'Before filtering', updated_state.conf, 'True:', cam.getObservation( updated_state) control = np.array([0, 0]) for _ in it.count(): z = cam.getObservation(updated_state) # apply filter and get state estimate currentLS = tlqr.getCurrentLS(0) nextLS = tlqr.getNextLS(1) updated_state = ekf.evolve(updated_state, control, z, currentLS, nextLS) print 'Filter:', updated_state.conf, 'True:', z time.sleep(0.25) if rospy.is_shutdown(): break
def test_controller(): # from optitrack import OptiTrackInterface from numpy.linalg import norm import time, datetime isSimulation = False if not isSimulation: rospy.init_node("ControllerTest", anonymous=True) # 1. load mission and environment mission = Mission.from_file( '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/simple/mission.yaml' ) logging.info('\n' + str(mission)) logging.info('Seed: %s', mission.simulation.get('seed', None)) # load environment env = mission.environment # load robot model assert mission.no_active_vehicles == 1 robotModel = mission.vehicles.itervalues().next() print 'Motion model:', robotModel['motion_model'] cc = mission.planning['setup']['controller'] SE2BeliefState.covNormWeight = 0 * cc['norm_covariance_weight'] SE2BeliefState.meanNormWeight = 1 #cc['norm_mean_weight'] SE2BeliefState.reachDist = cc['reach_dist'] # set the state component norm weights SE2BeliefState.normWeights = np.array([1, 1, 0]) #cc['norm_state_weights']) SE2BeliefState.normWeights[2] = 0 robot = UnicycleMotionModel(robotModel, isSimulation) print 'Time step:', robot.dt robot.rate.sleep() print 'Observation model:', robotModel['observation_model'] cam = CameraLocalization(robotModel, env, isSimulation=isSimulation) opti = OptitrackLocalization(isSimulation=isSimulation) quad = QuadCamPose(isSimulation=isSimulation) robot.rate.sleep() #--------------------------------------------------------------------------- d_epsilon = 0.05 # error bound around waypoint a_epsilon = np.deg2rad(15) # error bound for turning # rid = 1 # opti = OptiTrackInterface() # body = opti.trackInfo()[rid] # get data for rigid body w/ rigid body ID `rid' # opti._close() x, y, theta = cam.getObservation( None) #body.z, body.x, np.deg2rad(body.yaw) print x, y, theta #--------------------------------------------------------------------------- def save_trajectory(): true_traj = [] traj = [] optitrack_traj = [] quad_traj = [] while True: state = yield traj, true_traj, optitrack_traj, quad_traj print 'save state:', state.conf traj.append(state.copy()) true_traj.append( cam.getObservation(state)) #TODO: change this back optitrack_traj.append(opti.getObservation(state)) quad_traj.append(quad.getObservation(state)) # true_traj.append(cam.getObservationPrediction(state)) filter_time = 5 # [sec] filter_stab = int(filter_time / robot.dt) print 'filter steps:', filter_stab, 'dt:', robot.dt xs, ys = x, y covs = np.eye(3) # path = np.array([ # larger transition system # [1.0, 2.5, 0.0], # [1.227090539137495, 1.5261366178286393, 0], # [0.7814101593838313, 0.6309556309870786, 0], # [1.3450599380271273, 0.880746156908613, 0], # [2.063849180607642, 0.9785829893820421, 0], # [2.8594440505001724, 1.354833717086771, 0], # [3.187859804988132, 1.976122679525189, 0], # [3.6840077044823207, 2.8443491833405843, 0], # [2.925239900621002, 2.52353894593588, 0], # [2.288019681537405, 2.549151027045657, 0], # [1.9702269162937254, 3.102135677048742, 0], # [1.306980698970712, 3.091532230704365, 0], # [1.0, 2.5, 0.0] # ]) path = np.array([ # smaller transition system # [1.0, 2.5, 0.0], [1.227090539137495, 1.5261366178286393, 0], [0.6683476856850583, 0.6968077181658885, 0], [1.3618722811762958, 0.7142604793362379, 0], [2.059680461093005, 0.9722647716781803, 0], [2.935911437513193, 0.49039417072630315, 0], [3.5244797253701616, 1.0671446704710397, 0], [3.4171462977012945, 1.8636962642083317, 0], [3.2481431339499154, 2.580396624830811, 0], [2.6199028579323596, 2.5050497919480206, 0], [2.143319573133728, 3.0814698610803655, 0], [1.3410947476176465, 3.1958386839694723, 0], [1.0, 2.5, 0.0] ] * 10) # origin = np.array([-0.08, 0.696, 0]) origin = np.array([0, 0, 0]) path = [p - origin for p in path] print 'Init:', path[0] # return # figure = plt.figure() # figure.add_subplot('111') # axes = figure.axes[0] # axes.axis('equal') # sets aspect ration to 1 # # b = np.array(env['boundary']['limits']) # plt.xlim(b.T[0] - origin[0]) # plt.ylim(b.T[1] - origin[1]) # # mission.draw_environment(axes, figure, origin=origin[:2]) # # px, py, _ = np.array(path).T # # plt.plot(px, py, 'ko-') # # plt.show() # return # # path = [(x, y), (x+1, y+1), (x+2, y+1), (x+1, y), (x, y)] experiment = 70 with open('experiment_data_{}.txt'.format(experiment), 'a') as fout: t = time.time() print >> fout, '#', experiment, '#', t, '#', str( datetime.datetime.fromtimestamp(t)) for xg, yg, _ in path: start = SE2BeliefState(xs, ys, theta, cov=covs) goal = SE2BeliefState(xg, yg, np.pi / 2) # NOTE: end orientation does not matter print 'Start:', start.conf, 'Goal:', goal.conf tlqr = SwitchingController(robot, cam, goal, stateWeight=np.diag([1, 1, 200]), controlWeight=np.eye(2)) ekf = ExtendedKF(cam, robot) controller = Controller( robot, cam, tlqr, ekf, withNoise=True) #FIXME: withNoise option is not ok process = save_trajectory() process.next() _, (trajectory, true_trajectory, opti_trajectory, quad_traj) = controller.execute(start, process) robot.execute([0, 0]) print len(trajectory) print print 'Trajectories:' with open('experiment_data_{}.txt'.format(experiment), 'a') as fout: for p, tp, op, qp in zip(trajectory, true_trajectory, opti_trajectory, quad_traj): print 'C:', p.conf, 'T:', tp, 'O:', op print >> fout, '{', '"x": {}, "cov": {}, "filter": {}, "opti" : {}, "quad": {}'.format( list(p.conf), list(np.diag(p.cov)), list(tp), list(op), list(qp)), '}' print print >> fout print 'Start:', xs, ys, theta updated_state = trajectory[-1] print 'Before filtering', updated_state.conf, 'True:', cam.getObservation( updated_state) for _ in range(filter_stab): updated_state = controller.evolve(updated_state, 0, nocontrol=True) print >> fout, '{', '"x": {}, "cov": {}'.format( list(updated_state.conf), list(np.diag(updated_state.cov))), '}' xs, ys, theta = updated_state.conf covs = updated_state.cov print 'End:', xs, ys, theta, 'True:', cam.getObservation( updated_state) print print print >> fout print >> fout