def get_traj_set(self): """ generated source for method get_traj_set """ result = [] prev_id = 1 try: tmp = None index = 0 for line in self.reader: if index == 0: index = index + 1 continue obj_id = int(line[0]) if tmp == None: tmp = Trajectory(obj_id) if tmp != None and obj_id != prev_id: result.append(tmp) tmp = Trajectory(obj_id) prev_id = obj_id point = STPoint(obj_id, float(line[1]), float(line[2]), float(line[3])) tmp.points.append(point) except Exception as e: print(e.message) return result
def addPlayer(self, player): """ Adds a player in this room """ #we look for the first free racket to give to the player, if there was 3 players and the second one left #the new player will play in second position #TODO : it would be better to search for the None value and insert there instead of for-break !!! check : no #regression ! for axisID in self.players.keys(): if self.players[axisID] == None: #free racket found self.players[axisID] = player newPseudo = False #if the new player uses a name which is already in use, we add some random digits at the end and tell #him ! if player.name == self.players[axisID ^ 1].name: newPseudo = True player.name += str(random.randint(1, 9)) if player.name != "" and newPseudo: player.msgNewPseudo(player.name) #restart the trajectory (we must because the number of players has changed so have the rules) self.trajectory.stop() del self.trajectory self.trajectory = Trajectory(self) break
def crd2traj( self ): """ Convert coordinates into a Trajectory object. @return: trajectory object @rtype: Trajectory """ ## skip first empty line self.crd.readline() xyz = [] i = 0 if self.verbose: self.log.write( "Reading frames .." ) try: while 1==1: xyz += [ self.nextFrame() ] i += 1 if i % 100 == 0 and self.verbose: self.log.write( '#' ) except EOFError: if self.verbose: self.log.add("Read %i frames." % i) t = Trajectory( refpdb=self.ref ) t.frames = N.array( xyz ).astype(N.Float32) t.setRef( self.ref ) t.ref.disconnect() return t
def __init__(self, parent, machine): super(TrajectoryWidget, self).__init__(parent, machine) self.name = "TrajectoryWidget" self.icon = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'resources', 'icons', 'format-text-direction-ltr.png') self.tooltip = "Trajectory workspace" self.machine.add_listener(self) self.trajectory = Trajectory() self.trajectory_controller = TrajectoryControllerThread() self.limits = None # References to widgets self.calibrationFileButton = None self.calibrationFileLineEdit = None self.pointsFileButton = None self.pointsFileLineEdit = None self.loadButton = None self.graphicsView = None self.trajectoryComboBox = None self.indexComboBox = None self.stepSpinBox = None self.xScaleSpinBox = None self.yScaleSpinBox = None self.xOffsetSpinBox = None self.yOffsetSpinBox = None self.stopButton = None self.runButton = None # Image-related self.pixmap = QtGui.QPixmap() self.original_rect = None self.loadUI() self.update()
def run(self): env = gym.make("CartPole-v0") # Create the environment episode_done = True trajectory = Trajectory() next_state = [] episode_num = 0 episode_reward = 1 # TODO: check for alternative init rewards while True: if self.stop: # Check if we need to stop the current thread print("Worker:",self.worker_id, "STOP") break if episode_done: # Check if the episode is done state = env.reset() # Reset the environment and recover initial state episode_done = False template = "in worker {}, episode {} done after {} steps" # Template for print print(template.format(self.worker_id, episode_num, episode_reward)) self.res_queue.put(episode_reward) episode_num += 1 episode_reward = 1 for i in range(LOOKAHEAD): #print("I=",i) if self.is_eval(): # Render curretn state of the environement env.render() state = tf.convert_to_tensor(state) # Convert state to tensor state = tf.expand_dims(state, 0) action = self.act(state) # Select action based on the current policy. trajectory.store(s=state, a=action) state, reward, episode_done, _ = env.step(action) # Recover next state, reward and if the episode is done. #print("|New state\n",state,"\n|reward: \n",reward," \n|episode done \n",episode_done) if episode_done: # If the agent fails the task reward = -1 # Store the reward trajectory.store(r=reward) episode_reward += reward next_state = state # update current state if episode_done: next_state = [] break if episode_done and self.is_eval(): if self.is_eval(): env.close() template = "in worker {}, episode {} done after {} steps" # Template for print print(template.format(self.worker_id, episode_num, episode_reward)) break if not self.is_eval(): self.train(trajectory, next_state)# Update network using the trajectory trajectory.clear()
def getUpTrajectory(self, coords): lineM = LineMovement(270, 800) movements = [] movements.append(lineM) trajectory = Trajectory(self.gameParams, movements, coords, incr=10) trajectories = [] trajectories.append(trajectory) return trajectories
def update(self, trajectories_key): for el in trajectories_key: if el.has_key("trajectory"): rid = el["robot_id"] self.trajectories[rid] = Trajectory(json_file=el["trajectory"]) if el.has_key("discrete_path"): rid = el["robot_id"] self.discrete_paths[rid] = DiscretePath( json=el["discrete_path"])
def shrinkUniverse(universe, temperature=300.*Units.K, trajectory=None, scale_factor=0.95): """Shrinks |universe|, which must have been scaled up by Function:MMTK.Solvation.addSolvent, back to its original size. The compression is performed in small steps, in between which some energy minimization and molecular dynamics steps are executed. The molecular dynamics is run at the given |temperature|, and an optional |trajectory| (a MMTK.Trajectory.Trajectory object or a string, interpreted as a file name) can be specified in which intermediate configurations are stored. """ # Set velocities and initialize trajectory output universe.initializeVelocitiesToTemperature(temperature) if trajectory is not None: if type(trajectory) == type(''): trajectory = Trajectory(universe, trajectory, "w", "solvation protocol") close_trajectory = 1 else: close_trajectory = 0 actions = [TrajectoryOutput(trajectory, ["configuration"], 0, None, 1)] snapshot = SnapshotGenerator(universe, actions=actions) snapshot() # Do some minimization and equilibration minimizer = SteepestDescentMinimizer(universe, step_size = 0.05*Units.Ang) actions = [VelocityScaler(temperature, 0.01*temperature, 0, None, 1), TranslationRemover(0, None, 20)] integrator = VelocityVerletIntegrator(universe, delta_t = 0.5*Units.fs, actions = actions) for i in range(5): minimizer(steps = 40) integrator(steps = 200) # Scale down the system in small steps i = 0 while universe.scale_factor > 1.: if trajectory is not None and i % 1 == 0: snapshot() i = i + 1 step_factor = max(scale_factor, 1./universe.scale_factor) for object in universe: object.translateTo(step_factor*object.position()) universe.scaleSize(step_factor) universe.scale_factor = universe.scale_factor*step_factor for i in range(3): minimizer(steps = 10) integrator(steps = 50) del universe.scale_factor if trajectory is not None: snapshot() if close_trajectory: trajectory.close()
def simulate_trajectory(self, robot, vx, vtheta): poses = [] current_pose = [robot.pose.x, robot.pose.y, robot.pose.theta] for i in range(0, int(self.time / self.timestep)): current_pose[0] += (vx * math.cos(current_pose[2])) * self.timestep current_pose[1] += (vx * math.sin(current_pose[2])) * self.timestep current_pose[2] += vtheta * self.timestep new_pose = Pose(current_pose[0], current_pose[1], current_pose[2]) poses.append(new_pose) return Trajectory(poses=poses, velocity=Velocity(vx, vtheta), cost=0)
def test_discretize_path(self): traj = Trajectory() traj.load_paths_from_svg(self.svg_filepath) self.assertEqual(len(traj.paths), 2) discrete_traj = traj.discretize(traj.paths[0], 1) self.assertEqual(len(discrete_traj), 377) for point in discrete_traj: self.assertEqual(len(point), 2)
def getTrajectory(self, stepselection=[], usealigned=True, framestep=1, frameselection=[]): """ Get Trajectory object for current replica. If stepselection is given, only files corresponding to those steps will be returned. If usealigned is True, will try to use aligned trajectory if it exists. :arg list stepselection: List of ints identifying steps to return as trajectory. Will return all if empty. :arg bool usealigned: Use aligned trajectory if possible. :return: :class:`Trajectory.Trajectory` object """ from Trajectory import Trajectory if not stepselection: stepselection = range(1, self.ntrajfiles + 1) else: self.log.info("Trajectory selected steps: %s" % stepselection) if usealigned: if not self.isAligned(stepselection): usealigned = False if usealigned: path = self.alignpath checkext = self.checkAlignExtension self.log.debug("Using aligned trajectory") else: if not self.isProductionFinished(stepselection): raise ReplicaError, "Cannot retrieve trajectory for non-finished steps: %s" % stepselection path = self.mdpath checkext = self.checkProductionExtension self.log.debug("Using not aligned trajectory") # Build File list to parse flist = [] stepselection.sort() self.log.debug("Selected steps for trajectory: %s" % stepselection) for step in stepselection: extension = checkext(step)[step] if not extension: self.log.warn("File for step %i not found" % step) continue f = self.mdoutfiletemplate.format(step=step, extension=extension) flist.append(osp.join(path, f)) self.log.debug("Filelist for trajectory: %s" % flist) #Build Trajectory object self.log.debug( "Replica %s Trajectory: stepselection - %s ; framestep - %i " % (self.name, stepselection, framestep)) return Trajectory(flist, self.getPDB(), step=framestep, frameselection=frameselection)
def test_path_is_homogeneously_sampled(self): test_path = Path(Line(start=(0 + 0j), end=(5 + 0j))) expected_result = [(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0)] traj = Trajectory() traj.paths.append(test_path) result = traj.get_path(0, 0, 1) for (x1, y1), (x2, y2) in zip(result, expected_result): self.assertAlmostEqual(x1, x2, delta=0.01) self.assertAlmostEqual(y1, y2, delta=0.01)
def test_extract_paths_from_svg(self): traj = Trajectory() path_strings = traj._extract_paths_from_svg(self.svg_filepath) self.assertEqual(len(path_strings), 2) self.assertEqual( path_strings[0], "M 99.75,67.46875 C 71.718268,67.468752 73.46875,79.625 73.46875,79.625 L 73.5,92.21875 L 100.25,92.21875 L 100.25,96 L 62.875,96 C 62.875,96 44.9375,93.965724 44.9375,122.25 C 44.937498,150.53427 60.59375,149.53125 60.59375,149.53125 L 69.9375,149.53125 L 69.9375,136.40625 C 69.9375,136.40625 69.433848,120.75 85.34375,120.75 C 101.25365,120.75 111.875,120.75 111.875,120.75 C 111.875,120.75 126.78125,120.99096 126.78125,106.34375 C 126.78125,91.696544 126.78125,82.125 126.78125,82.125 C 126.78125,82.124998 129.04443,67.46875 99.75,67.46875 z M 85,75.9375 C 87.661429,75.937498 89.8125,78.088571 89.8125,80.75 C 89.812502,83.411429 87.661429,85.5625 85,85.5625 C 82.338571,85.562502 80.1875,83.411429 80.1875,80.75 C 80.187498,78.088571 82.338571,75.9375 85,75.9375 z " ) self.assertEqual( path_strings[1], "M 100.5461,177.31485 C 128.57784,177.31485 126.82735,165.1586 126.82735,165.1586 L 126.7961,152.56485 L 100.0461,152.56485 L 100.0461,148.7836 L 137.4211,148.7836 C 137.4211,148.7836 155.3586,150.81787 155.3586,122.53359 C 155.35861,94.249323 139.70235,95.252343 139.70235,95.252343 L 130.3586,95.252343 L 130.3586,108.37734 C 130.3586,108.37734 130.86226,124.03359 114.95235,124.03359 C 99.042448,124.03359 88.421098,124.03359 88.421098,124.03359 C 88.421098,124.03359 73.514848,123.79263 73.514848,138.43985 C 73.514848,153.08705 73.514848,162.6586 73.514848,162.6586 C 73.514848,162.6586 71.251668,177.31485 100.5461,177.31485 z M 115.2961,168.8461 C 112.63467,168.8461 110.4836,166.69503 110.4836,164.0336 C 110.4836,161.37217 112.63467,159.2211 115.2961,159.2211 C 117.95753,159.2211 120.1086,161.37217 120.1086,164.0336 C 120.10861,166.69503 117.95753,168.8461 115.2961,168.8461 z " )
def generate(self): t = array([0.0]) phi = array([[0.0, 0.0, 0.0]]) eps = array([[self.elevation * deg, 0.0, 0.0, 0.0, 0.0]]) lamb = array([[self.travel * deg, 0.0, 0.0, 0.0, 0.0]]) _, vf_vb = compute_pitch_and_inputs_flatness_centripetal( eps[0], lamb[0]) vf = array([[vf_vb[0]]]) vb = array([[vf_vb[1]]]) return Trajectory(t, phi, eps, lamb, vf, vb)
def main(): pts1 = [[1, 2], [3, 3], [2, 1], [3, 2], [5, 5], [14, 4]] pts2 = [[0, -2], [1, 1], [4, 1], [3, 5], [7, 5], [10, 9]] lines1 = [] for i in range(len(pts1) - 1): l = Line(np.asarray(pts1[i]), np.asarray(pts1[i + 1])) lines1.append(l) lines2 = [] for i in range(len(pts2) - 1): l = Line(np.asarray(pts2[i]), np.asarray(pts2[i + 1])) lines2.append(l) t1 = Trajectory(lines1) t2 = Trajectory(lines2) Q = [ np.array([0, 0]), np.array([1, -2]), np.array([4, 0]), np.array([12, -3]) ] d = DistanceMetric(Q) dist = d.calc_trajectorydst(Q, t1, t2) print(dist)
def test_get_path_not_starting_at_0(self): test_path = Path(Line(start=(0 + 0j), end=(2 + 2j)), Line(start=(2 + 2j), end=(7 + 1j)), Line(start=(7 + 1j), end=(0 + 0j))) expected_result = Trajectory.discretize( Path(Line(start=(2 + 2j), end=(7 + 1j)), Line(start=(7 + 1j), end=(0 + 0j)), Line(start=(0 + 0j), end=(2 + 2j))), 1) traj = Trajectory() traj.paths.append(test_path) result = traj.get_path(0, 1, 1) self.assertEqual(result, expected_result)
def __init__(self,dir): self.dir = dir self.learning_network = Network(49,128) self.learning_network.load_state_dict(torch.load(self.dir)) self.learning_network.eval() self.curr_network = Network(49, 128) self.curr_network.load_state_dict(torch.load(self.dir)) self.curr_network.eval() self.env = Environment() self.trajectory_size = 5000 self.trajectory = Trajectory(self.trajectory_size) self.optimizer = torch.optim.SGD(self.learning_network.parameters(), lr=1e-3,weight_decay=10e-4, momentum=0.9) self.tree = Tree(self.curr_network)
def removePlayer(self, player): """ Delete a player from the room. """ self.players[player.axis] = None player.msgGstat() #tells the players that there is one player less if self.player_nb() == 0: #empty room => deleted room self.trajectory.stop() del self.trajectory self.rooms.remove(self) self.site.msgTotalNumberOfRooms( ) #tells the player that there is one room less elif self.player_nb() == 1: #we try to pair the player left alone with an other solo player for room in self.rooms[:]: if room.player_nb() == 1 and room != self: #we have found a game "room" with 1 player ("room!=self" allows not to choose the previous game of the player) playerToMove = self.players[ player.axis ^ 1] #player.axis ^ 1 gives the other player of the room self.trajectory.stop() del self.trajectory self.rooms.remove(self) #playerToMove.reset() #use it if we want that the coming player beginns with score = 0 playerToMove.room = room playerToMove.axis = room.players.values().index(None) room.addPlayer(playerToMove) playerToMove.msgGstat() playerToMove.msgSyncJ() self.rooms.remove(room) self.rooms.append(room) self.site.msgTotalNumberOfRooms() break else: otherPlayer = self.players[player.axis ^ 1] self.trajectory.stop() del self.trajectory self.trajectory = Trajectory(self) #if otherPlayer.axis == 1: # self.players[1]= None # otherPlayer.axis =0 # self.players[0] = otherPlayer otherPlayer.msgGstat() otherPlayer.msgSyncJ() self.site.msgTotalNumberOfRooms(otherPlayer)
def getLeftTrajectories(self): lineM = LineMovement(random.randrange(75, 100), 250) lineM2 = LineMovement(random.randrange(115, 150), 250) lineM3 = LineMovement(random.randrange(165, 195), 250) lineM4 = LineMovement(random.randrange(210, 1240), 250) movements = [] movements.append(lineM) movements.append(lineM2) movements.append(lineM3) movements.append(lineM4) trajectory = Trajectory( self.gameParams, movements, (random.randrange(400, self.gameParams.getWidth() - 150), -50), incr=4) trajectories = [] trajectories.append(trajectory) return trajectories
def getRightTrajectories(self): lineM = LineMovement(random.randrange(75, 100), 250) lineM2 = LineMovement(random.randrange(30, 60), 250) lineM3 = LineMovement(random.randrange(0, 15), 250) lineM4 = LineMovement(random.randrange(300, 1330), 250) movements = [] movements.append(lineM) movements.append(lineM2) movements.append(lineM3) movements.append(lineM4) trajectory = Trajectory( self.gameParams, movements, (random.randrange(150, self.gameParams.getWidth() - 400), -50), incr=4) trajectories = [] trajectories.append(trajectory) return trajectories
def align_trajectory(trajectory0: Trajectory, trajectory1: Trajectory) -> Trajectory: # Align trajectory1 to trajectory0 assert isinstance(trajectory0, Trajectory) assert isinstance(trajectory1, Trajectory) t0 = max(trajectory0.begin_time(), trajectory1.begin_time()) t1 = min(trajectory0.end_time(), trajectory1.end_time()) if t0 >= t1: return Trajectory() num_points = 100 times = np.linspace(t0, t1, num_points).tolist() points0 = [trajectory0[t].x() for t in times] points1 = [trajectory1[t].x() for t in times] S = umeyama(points0, points1).to_SE3() trajectoryA = S * trajectory0 return trajectoryA
def test_get_normalized_path(self): traj = Trajectory() test_path = Path(Line(start=(0 + 0j), end=(2 + 2j)), Line(start=(2 + 2j), end=(7 + 1j)), Line(start=(7 + 1j), end=(0 + 0j))) expected_path = Trajectory.scale( Trajectory.discretize( Path(Line(start=(0 + 0j), end=(2 + 2j)), Line(start=(2 + 2j), end=(7 + 1j)), Line(start=(7 + 1j), end=(0 + 0j))), 1), 1 / 7.0, 1 / 2.0) traj.paths.append(test_path) normalized_trajectory = traj.get_normalized_path(0) self.assertGreater(len(normalized_trajectory), 3) for (x1, y1), (x2, y2) in zip(normalized_trajectory, expected_path): self.assertAlmostEqual(x1, x2, delta=0.01) self.assertAlmostEqual(y1, y2, delta=0.01)
def getPilotContent(pygame, gameDisplay, gameParams): enemy = Enemy_1(pygame, gameDisplay, gameParams) lineM = LineMovement(90, 300) lineM2 = LineMovement(45, 300) lineM3 = LineMovement(270, 300) lineM4 = LineMovement(0, 300) movements = [] movements.append(lineM) movements.append(lineM2) movements.append(lineM3) movements.append(lineM4) trajectory = Trajectory(gameParams, movements, (gameParams.getWidth() // 2, -50), incr=3) trajectories = [] trajectories.append(trajectory) conduct = Conduct(trajectories) content = Content(enemy, conduct, 5) contents = [] contents.append(content) return contents
def run(self): env = gym.make("CartPole-v0") episode_done = True trajectory = Trajectory() next_state = [] episode_num = 0 episode_reward = 1 while True: if episode_done: state = env.reset() episode_done = False template = "in worker {}, episode {} done after {} steps" print( template.format(self.worker_id, episode_num, episode_reward)) self.res_queue.put(episode_reward) episode_num += 1 episode_reward = 1 for i in range(LOOKAHEAD): # env.render() state = tf.convert_to_tensor(state) state = tf.expand_dims(state, 0) action = self.act(state) trajectory.store(s=state, a=action) state, reward, episode_done, _ = env.step(action) if episode_done: reward = -1 trajectory.store(r=reward) episode_reward += reward next_state = state if episode_done: next_state = [] break self.train(trajectory, next_state) trajectory.clear()
def extract_traj(fname: str, config: dict) -> Trajectory: """Extract trajector from filename and create Trajectory instance. Args: fname: Filename from which to load trajectory. config: Dictionary containing parameters of javascript simulation. Returns: Trajectory with mapped states and actions. """ states, actions = [], [] data = load_json(fname) for step in data: states.append(step['data']['state']) actions.append(step['data']['action']) traj = Trajectory(states, actions, config, fname) traj.remap_states() traj.as_numpy() return traj
def addPlayerToARoom(self, player): """ Adds a player into the first not full (1 player) game room """ for room in self.rooms[:]: # [:] to create a temp copy of "rooms", it allows the modification of "rooms" during loop #we loop over the existing rooms if room.player_nb() < 2: player.room = room #id of the player's racket's axis (0 means left racket, 1 means right racket) #when we are here, this new player hasn't been yet added in self.room.players => +1 player.axis = room.players.values().index(None) room.addPlayer(player) #this room has just been given a new player, so we move it at the end of the list, it's important #if we want to pair the new player with the solo player who has been waiting for the longest time self.rooms.remove(room) self.rooms.append(room) self.msgTotalNumberOfRooms(player) break else: #strange Pythonic construction, this "else" is paired with "for" and is called if the for hasn't been "breaked" #it means that we haven't found any suitable room (solo player) so we create a new one ! room = Room(self) player.room = room player.axis = room.players.values().index( None ) #TODO : this is a new game so the axis is obviously #1, right ? #room.addPlayer(player) room.players[0] = player room.trajectory = Trajectory(room) #player.msgGstat() # TODO : enlever ? self.rooms.append(room) self.msgTotalNumberOfRooms( ) #tell all the players that a new room is born
def generate(self): elevation_d_start = zeros(self.derivative_order + 1) elevation_d_start[0] = self.elevation_start * deg elevation_d_end = zeros(self.derivative_order + 1) elevation_d_end[0] = self.elevation_end * deg travel_d_start = zeros(self.derivative_order + 1) travel_d_start[0] = self.travel_start * deg travel_d_end = zeros(self.derivative_order + 1) travel_d_end[0] = self.travel_end * deg elevation_planner = PolynomialPlanner(elevation_d_start, elevation_d_end, 0.0, self.t_end, self.derivative_order) travel_planner = PolynomialPlanner(travel_d_start, travel_d_end, 0.0, self.t_end, self.derivative_order) num_samples = int(self.t_end * self.sample_rate) + 1 t = linspace(0.0, self.t_end, num_samples) eps = elevation_planner.eval_vec(t) lamb = travel_planner.eval_vec(t) phi = empty((num_samples, 3)) vf = empty((num_samples, 1)) vb = empty((num_samples, 1)) for i in range(num_samples): phi_i, vf_vb_i = compute_pitch_and_inputs_flatness_centripetal( eps[i], lamb[i]) phi[i] = phi_i vf[i, 0] = vf_vb_i[0] vb[i, 0] = vf_vb_i[1] return Trajectory(t, phi, eps, lamb, vf, vb)
def __init__(self, initialPos, simtype="UAS_ROTOR", vehicleID = 0, fasttime = True, verbose=0,callsign = "SPEEDBIRD", monitor="DAIDALUS", daaConfig="data/DaidalusQuadConfig.txt"): self.fasttime = fasttime self.callsign = callsign self.verbose = verbose self.home_pos = [initialPos[0], initialPos[1], initialPos[2]] self.traffic = [] if simtype == "UAM_VTOL": self.ownship = VehicleSim(vehicleID,0.0,0.0,0.0,0.0,0.0,0.0) else: from quadsim import QuadSim self.ownship = QuadSim() self.vehicleID = vehicleID self.Cog = Cognition(callsign) self.Guidance = Guidance(GuidanceParam()) self.Geofence = GeofenceMonitor([3,2,2,20,20]) self.Trajectory = Trajectory(callsign) self.tfMonitor = TrafficMonitor(callsign,daaConfig,False,monitor) self.Merger = Merger(callsign,vehicleID) # Aircraft data self.flightplan1 = [] self.etaFP1 = False self.etaFP2 = False self.flightplan2 = [] self.controlInput = [0.0,0.0,0.0] self.fenceList = [] self.guidanceMode = GuidanceMode.NOOP # Merger self.arrTime = None self.logLatency = 0 self.prevLogUpdate = 0 self.mergerLog = LogData() self.position = self.home_pos self.velocity = [0.0,0.0,0.0] self.trkgsvs = [0.0,0.0,0.0] self.localPos = [] self.ownshipLog = {"t": [], "position": [], "velocityNED": [], "positionNED": [], "trkbands": [], "gsbands": [], "altbands": [], "vsbands": [], "localPlans": [], "localFences": [], "commandedVelocityNED": []} self.trafficLog = {} self.emergbreak = False if self.fasttime: self.currTime = 0 else: self.currTime = time.time() self.numSecPlan = 0 self.plans = [] self.fences = [] self.mergeFixes = [] self.localPlans = [] self.localFences= [] self.localMergeFixes = [] self.daa_radius = 0 self.startSent = False self.nextWP1 = 1 self.nextWP2 = 1 self.numFences = 0 self.resSpeed = 0 self.defaultWPSpeed = 1 self.missionComplete = False self.fphases = -1 self.land = False self.activePlan = "Plan0" self.windFrom = 0 self.windSpeed = 0
def createTrajectories(self, hwynet, storefile, inkey, outkey): """ Takes the sequence of points, and converts each into a trajectory object. storefile - HDF datastore with GPS points in it. """ # open the data store store = pd.HDFStore(storefile) # get the list of dates and cab_ids to process dates = store.select_column(inkey, 'date').unique() dates.sort() print('Retrieved a total of %i days to process' % len(dates)) # loop through the dates rowsWritten = 0 for date in dates: print('Processing ', date) # get the data and sort gps_df = store.select(inkey, where='date==Timestamp(date)') # loop through each trip last_cab_id = 0 groups = gps_df.groupby(['cab_id', 'trip_id', 'status']) for group in groups: (cab_id, trip_id, status) = group[0] if (cab_id != last_cab_id): print(' Processing cab_id: ', cab_id) # group[0] is the index, group[1] is the records traj = Trajectory(hwynet, group[1]) # check for empty set if (len(traj.candidatePoints) == 0): continue # determine most likely paths and points traj.calculateMostLikely() # for debugging if (cab_id, trip_id) in self.debugCabTripIds: traj.printDebugInfo(self.debugFile, ids=(cab_id, trip_id)) # allocate trajectory travel times to links (link_ids, traversalRatios, startTimes, travelTimes) = \ self.allocateTrajectoryTravelTimeToLinks(hwynet, traj) # create a dataframe data = { 'link_id': link_ids, 'traversal_ratio': traversalRatios, 'start_time': startTimes, 'travel_time': travelTimes } link_df = pd.DataFrame(data) link_df['date'] = date link_df['cab_id'] = cab_id link_df['trip_id'] = trip_id link_df['status'] = status last_cab_id = cab_id # set the index link_df.index = rowsWritten + pd.Series(range(0, len(link_df))) rowsWritten += len(link_df) # write the data store.append(outkey, link_df, data_columns=True) # all done store.close()
import json import bezier import numpy as np import csv from Trajectory import Trajectory if __name__ == "__main__": fl = open("sample.json", 'r') t = json.load(fl) x = t["original_trajectories"][0] pieces = x["trajectory"] tre = Trajectory(csv_file_2d="example2d.csv") tre2 = Trajectory(json_file=x["trajectory"]) tre.to_marker(90)