def scenario_deadend_test(self): waypoints = [(0, 0, 0), (50, 0, 0), (100, 0, 0)] self.path = QPMI(waypoints) self.current = Current(mu=0, Vmin=0, Vmax=0, Vc_init=0, alpha_init=0, beta_init=0, t_step=0) radius = 25 angles = np.linspace(-90, 90, 10) * np.pi / 180 obstalce_radius = (angles[1] - angles[0]) * radius / 2 for ang1 in angles: for ang2 in angles: x = 30 + radius * np.cos(ang1) * np.cos(ang2) y = radius * np.cos(ang1) * np.sin(ang2) z = -radius * np.sin(ang1) self.obstacles.append(Obstacle(obstalce_radius, [x, y, z])) init_pos = [0, 0, 0] init_attitude = np.array([ 0, self.path.get_direction_angles(0)[1], self.path.get_direction_angles(0)[0] ]) initial_state = np.hstack([init_pos, init_attitude]) return initial_state
def scenario_test_path_current(self): initial_state = self.scenario_test_path() self.current = Current(mu=0, Vmin=0.75, Vmax=0.75, Vc_init=0.75, alpha_init=np.pi / 4, beta_init=np.pi / 6, t_step=0) return initial_state
def scenario_test_current(self): initial_state = self.scenario_test() self.current = Current( mu=0, Vmin=0.75, Vmax=0.75, Vc_init=0.75, alpha_init=np.pi / 4, beta_init=np.pi / 6, t_step=0) # Constant velocity current (reproducability for report) return initial_state
def scenario_test_path(self): self.n_waypoints = len(test_waypoints) self.path = QPMI(test_waypoints) self.current = Current(mu=0, Vmin=0, Vmax=0, Vc_init=0, alpha_init=0, beta_init=0, t_step=0) init_pos = [0, 0, 0] init_attitude = np.array([ 0, self.path.get_direction_angles(0)[1], self.path.get_direction_angles(0)[0] ]) initial_state = np.hstack([init_pos, init_attitude]) return initial_state
def scenario_vertical_test(self): waypoints = [(0, 0, 0), (50, 0, 0), (100, 0, 0)] self.path = QPMI(waypoints) self.current = Current(mu=0, Vmin=0, Vmax=0, Vc_init=0, alpha_init=0, beta_init=0, t_step=0) self.obstacles = [] for i in range(7): z = -30 + 10 * i self.obstacles.append(Obstacle(radius=5, position=[50, 0, z])) init_pos = [0, 0, 0] init_attitude = np.array([ 0, self.path.get_direction_angles(0)[1], self.path.get_direction_angles(0)[0] ]) initial_state = np.hstack([init_pos, init_attitude]) return initial_state
def scenario_beginner(self): initial_state = np.zeros(6) self.current = Current(mu=0, Vmin=0, Vmax=0, Vc_init=0, alpha_init=0, beta_init=0, t_step=0) #Current object with zero velocity waypoints = generate_random_waypoints(self.n_waypoints) self.path = QPMI(waypoints) init_pos = [ np.random.uniform(0, 2) * (-5), np.random.normal(0, 1) * 5, np.random.normal(0, 1) * 5 ] init_attitude = np.array([ 0, self.path.get_direction_angles(0)[1], self.path.get_direction_angles(0)[0] ]) initial_state = np.hstack([init_pos, init_attitude]) return initial_state
def scenario_expert(self): initial_state = self.scenario_advanced() self.current = Current(mu=0.2, Vmin=0.5, Vmax=1.0, Vc_init=np.random.uniform(0.5, 1), \ alpha_init=np.random.uniform(-np.pi, np.pi), beta_init=np.random.uniform(-np.pi/4, np.pi/4), t_step=self.step_size) self.penalize_control = 1.0 return initial_state