def test_waypoint_visualization_with_ngon_generator_predicted_human(self):
        # initialize the drone state
        waypoint_generator = NGonWaypointGenerator(n=6, radius=4)
        gamma = 0.9
        cinematic_controller = CinematicController(
            waypoint_generator=waypoint_generator, bb_filter_gamma=gamma)
        cinematic_controller.update_latest_drone_state(DroneState(x=2, y=-3))
        cinematic_controller.person_predictor = PolynomialPredictor(
            poly_degree=2, weight_decay_factor=1)
        cinematic_controller.update_latest_bbs([BoundingBox((10, 10), (0, 0))])
        time.sleep(.001)
        cinematic_controller.update_latest_bbs([BoundingBox((5, 5), (50, 0))])
        time.sleep(.001)
        cinematic_controller.update_latest_bbs([BoundingBox((2, 2), (50, 0))])
        time.sleep(.001)
        cinematic_controller.update_latest_bbs([BoundingBox((1, 1), (50, 0))])

        # initialize the drone waypoints
        waypoints = cinematic_controller.generate_waypoints()

        # initialize the person state
        person_states = cinematic_controller.person_predictor.predict_next_person_state(
            [2, 4, 6, 8, 10, 15, 20, 25, 30])

        # initialize the environment
        environment = Environment()
        environment.add_obstacles([[(6, 0), (4, 2), (6, 2)],
                                   [(8, -4), (9, -3), (10, -3), (10, -5),
                                    (9, -3.5)], [(-6, -2), (-1, -5),
                                                 (-1, -2)]])

        # initialize visualization object and plot
        viz = WaypointVisualization(waypoints, person_states, environment)
        viz.plot('plots/moving_target.png')
    def test_waypoint_visualization_with_ngon_generator(self):
        # initialize the drone state
        waypoint_generator = NGonWaypointGenerator(n=6, radius=4)
        gamma = 0.9
        cinematic_controller = CinematicController(
            waypoint_generator=waypoint_generator, bb_filter_gamma=gamma)
        cinematic_controller.update_latest_drone_state(origin_drone_state)
        cinematic_controller.update_latest_bbs([bb_10_10_0_0])

        # initialize the drone waypoints
        waypoints = cinematic_controller.generate_waypoints()

        # initialize the person state
        person_state = PersonState(4, 0)

        # initialize the environment
        environment = Environment()
        environment.add_obstacles([[(6, 0), (4, 2), (6, 2)],
                                   [(7, -4), (8, -3), (9, -3), (9, -5),
                                    (8, -3.5)], [(-5, -2), (0, -5), (0, -2)]])

        # initialize visualization object and plot
        viz = WaypointVisualization(waypoints, [person_state], environment)
        viz.plot('plots/test_waypoint_visualization_with_ngon_generator.png')

        # check that file was saved properly
        assert os.path.isfile(
            'plots/test_waypoint_visualization_with_ngon_generator.png')
Example #3
0
    def create_environment(self):

        # Create the maze
        self.env = Environment(n=self.ground_dimension,
                               mine_density=self.mine_density,
                               visual=self.visual,
                               end_game_on_mine_hit=self.end_game_on_mine_hit)
        self.env.generate_environment()
Example #4
0
	def create_environment(self, new_maze = None):

		# Create the maze
		self.env = Environment(algorithm = self.algorithm, n = self.maze_dimension, p = self.probability_of_obstacles, fire = self.fire)
		self.env.generate_maze(new_maze = new_maze)

		# Generate graph from the maze
		self.env.create_graph_from_maze()
Example #5
0
def generate_traj_set(feat_list):
    # Before calling this function, you need to decide what features you care
    # about, from a choice of table, coffee, human, origin, and laptop.
    pick = [104.2, 151.6, 183.8, 101.8, 224.2, 216.9, 310.8]
    place = [210.8, 101.6, 192.0, 114.7, 222.2, 246.1, 322.0]
    start = np.array(pick) * (math.pi / 180.0)
    goal = np.array(place) * (math.pi / 180.0)
    goal_pose = None
    T = 20.0
    timestep = 0.5
    WEIGHTS_DICT = {
        "table": [0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 7.0, 8.0],
        "coffee": [0.0, 0.2, 0.4, 0.6, 0.8, 1.0],
        "laptop": [0.0, 20.0, 21.0, 22.0, 24.0, 26.0, 30.0, 40.0],
        "human": [0.0, 20.0, 21.0, 22.0, 24.0, 26.0, 30.0, 40.0],
        "efficiency": [1.0]
    }

    # Openrave parameters for the environment.
    model_filename = "jaco_dynamics"
    object_centers = {
        'HUMAN_CENTER': [-0.6, -0.55, 0.0],
        'LAPTOP_CENTER': [-0.7929, -0.1, 0.0]
    }
    environment = Environment(model_filename, object_centers)

    # Planner Setup
    max_iter = 50
    num_waypts = 5
    feat_list = [x.strip() for x in feat_list.split(',')]
    num_features = len(feat_list)
    planner = TrajoptPlanner(feat_list, max_iter, num_waypts, environment)

    # Construct set of weights of interest.
    weights_span = [None] * num_features
    for feat in range(0, num_features):
        weights_span[feat] = WEIGHTS_DICT[feat_list[feat]]
    weight_pairs = list(itertools.product(*weights_span))
    weight_pairs = [list(i) for i in weight_pairs]

    traj_rand = {}
    for (w_i, weights) in enumerate(weight_pairs):
        traj = planner.replan(start, goal, goal_pose, weights, T, timestep)
        Phi = environment.featurize(traj.waypts, feat_list)
        # Getting rid of bad, out-of-bounds trajectories
        if any(phi < 0.0 for phi in Phi):
            continue
        traj = traj.waypts.tolist()
        if repr(traj) not in traj_rand:
            traj_rand[repr(traj)] = weights

    here = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
    savefile = "/data/traj_sets/traj_set_table_laptop.p"
    pickle.dump(traj_rand, open(here + savefile, "wb"))
    print "Saved in: ", savefile
    print "Used the following weight-combos: ", weight_pairs
def action():
    env = Environment().get_environment_info()

    allure.environment(app=env.apk)
    allure.environment(app_activity=env.app_activity)
    allure.environment(device_name=env.devices[0].device_name)
    allure.environment(platform_name=env.devices[0].platform_name)
    allure.environment(platform_version=env.devices[0].platform_version)

    capabilities = {
        'platformName': env.devices[0].platform_name,
        'platformVersion': env.devices[0].platform_version,
        'deviceName': env.devices[0].device_name,
        'app': env.apk,
        'clearSystemFiles': True,
        'appActivity': env.app_activity,
        'appPackage': env.app_package,
        'automationName': 'UIAutomator2',
        'newCommandTimeout': "2000",
        'unicodeKeyboard': True,
        'resetKeyboard': True,
        'noSign': True,
        'autoGrantPermissions': True
    }
    host = "http://localhost:4724/wd/hub"
    driver = webdriver.Remote(host, capabilities)
    yield ElementActions(driver).reset(driver)
    driver.quit()
    def __init__(self, config_path=""):
        super(ProjectMain, self).__init__()

        environment = Environment.get_instance()
        environment.init_by_file_name("/.", os.path.basename(os.getcwd()), 1)
        configure = Configure.get_instance(environment)
        self._project = Project()
	def __init__(self, loadfile):
		with open(loadfile, 'r') as stream:
			params = yaml.load(stream)

		# ----- General Setup ----- #
		self.prefix = params["setup"]["prefix"]
		self.feat_list = params["setup"]["feat_list"]
		self.demo_spec = params["setup"]["demo_spec"]

		# Openrave parameters for the environment.
		model_filename = params["setup"]["model_filename"]
		object_centers = params["setup"]["object_centers"]
		self.environment = Environment(model_filename, object_centers)

		# Learner setup.
		constants = {}
		constants["trajs_path"] = params["learner"]["trajs_path"]
		constants["betas_list"] = params["learner"]["betas_list"]
		constants["weight_vals"] = params["learner"]["weight_vals"]
		constants["FEAT_RANGE"] = params["learner"]["FEAT_RANGE"]
		self.learner = DemoLearner(self.feat_list, self.environment, constants)

		if self.demo_spec == "simulate":
			# Task setup.
			pick = params["sim"]["task"]["start"]
			place = params["sim"]["task"]["goal"]
			self.start = np.array(pick)*(math.pi/180.0)
			self.goal = np.array(place)*(math.pi/180.0)
			self.goal_pose = None if params["sim"]["task"]["goal_pose"] == "None" else params["sim"]["task"]["goal_pose"]
			self.T = params["sim"]["task"]["T"]
			self.timestep = params["sim"]["task"]["timestep"]
			self.weights = params["sim"]["task"]["feat_weights"]
			
			# Planner Setup.
			planner_type = params["sim"]["planner"]["type"]
			if planner_type == "trajopt":
				max_iter = params["sim"]["planner"]["max_iter"]
				num_waypts = params["sim"]["planner"]["num_waypts"]
				
				# Initialize planner and compute trajectory simulation.
				self.planner = TrajoptPlanner(self.feat_list, max_iter, num_waypts, self.environment)
			else:
				raise Exception('Planner {} not implemented.'.format(planner_type))
			
			self.traj = [self.planner.replan(self.start, self.goal, self.goal_pose, self.weights, self.T, self.timestep)]
			plotTraj(self.environment.env, self.environment.robot, self.environment.bodies, self.traj[0].waypts, size=0.015,color=[0, 0, 1])
			plotCupTraj(self.environment.env, self.environment.robot, self.environment.bodies, [self.traj[0].waypts[-1]],color=[0,1,0])
		else:
			data_path = params["setup"]["demo_dir"]
			data_str = self.demo_spec.split("_")
			data_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '../')) + data_path
			if data_str[0] == "all":
				file_str = data_dir + '/*task{}*.p'.format(data_str[1])
			else:
				file_str = data_dir + "/demo_ID{}_task{}*.p".format(data_str[0], data_str[1])

			self.traj = [pickle.load(open(demo_file, "rb")) for demo_file in glob.glob(file_str)]
		
		self.learner.learn_weights(self.traj)
Example #9
0
def action():
    env = Environment().get_environment_info()
    capabilities = {'platformName': env.devices[0].platform_name,
                    'platformVersion': env.devices[0].platform_version,
                    'deviceName': env.devices[0].device_name,
                    'appPackage': 'in.togetu.video',
                    'clearSystemFiles': True,
                    'appActivity': env.app_activity,

                    'noReset':True,
                    'noSign': True
                    }
    host = "http://localhost:4723/wd/hub"
    driver = webdriver.Remote(host, capabilities)
    yield ElementActions(driver).reset(driver)
    driver.quit()
	def load_parameters(self):
		"""
		Loading parameters and setting up variables from the ROS environment.
		"""

		# ----- General Setup ----- #
		self.prefix = rospy.get_param("setup/prefix")
		self.start = np.array(rospy.get_param("setup/start"))*(math.pi/180.0)
		self.T = rospy.get_param("setup/T")
		self.timestep = rospy.get_param("setup/timestep")
		self.save_dir = rospy.get_param("setup/save_dir")

		# Openrave parameters for the environment.
		model_filename = rospy.get_param("setup/model_filename")
		object_centers = rospy.get_param("setup/object_centers")
		self.environment = Environment(model_filename, object_centers)

		# ----- Controller Setup ----- #
		# Retrieve controller specific parameters.
		controller_type = rospy.get_param("controller/type")  
		if controller_type == "pid":
			# P, I, D gains.
			P = rospy.get_param("controller/p_gain") * np.eye(7)
			I = rospy.get_param("controller/i_gain") * np.eye(7)
			D = rospy.get_param("controller/d_gain") * np.eye(7)

			# Stores proximity threshold.
			epsilon = rospy.get_param("controller/epsilon")

			# Stores maximum COMMANDED joint torques.
			MAX_CMD = rospy.get_param("controller/max_cmd") * np.eye(7)

			self.controller = PIDController(P, I, D, epsilon, MAX_CMD)
		else:
			raise Exception('Controller {} not implemented.'.format(controller_type))

		# Tell controller to move to start.
		self.controller.set_trajectory(Trajectory([self.start], [0.0]))

		# Stores current COMMANDED joint torques.
		self.cmd = np.eye(7)

		# Utilities for recording data.
		self.expUtil = experiment_utils.ExperimentUtils(self.save_dir)
Example #11
0
def action2():
    env = Environment().get_environment_info()
    capabilities = {
        'platformName': env.devices[0].platform_name,
        'platformVersion': env.devices[0].platform_version,
        'deviceName': env.devices[0].device_name,
        'app': env.apk,
        'clearSystemFiles': True,
        'appActivity': env.app_activity,
        'appPackage': env.app_package,
        #'automationName': 'UIAutomator2',
        'noSign': True,
        'noReset': True,
        'systemPort': random.randint(4700, 4900)
    }
    host = "http://localhost:4724/wd/hub"
    driver = webdriver.Remote(host, capabilities)
    yield ElementActions(driver).reset(driver)
    driver.quit()
Example #12
0
    def get_performance(self):
        performance_dict_1 = dict()
        performance_dict_2 = dict()
        for mine_density in np.arange(0.01, 0.2, 0.01):
            print(mine_density)
            performance_dict_1[mine_density] = dict()
            performance_dict_2[mine_density] = dict()

            final_scores_1 = list()
            mines_hit_1 = list()
            correct_mines_1 = list()
            incorrect_mines_1 = list()

            final_scores_2 = list()
            mines_hit_2 = list()
            correct_mines_2 = list()
            incorrect_mines_2 = list()
            for _ in range(10):
                env = Environment(n=15,
                                  mine_density=mine_density,
                                  end_game_on_mine_hit=False,
                                  visual=False)
                env.generate_environment()

                agent1 = CSPAgent(env=env, end_game_on_mine_hit=False)
                agent1.play()

                agent2 = BonusCSPAgent(env=env, end_game_on_mine_hit=False)
                agent2.play()

                # agent = ProbCSPAgent(env = env,
                #                       end_game_on_mine_hit = False,
                #                       use_probability_agent = self.use_probability_agent,
                #                       prob = 0.3)
                # agent.play()

                metrics_1 = agent1.get_gameplay_metrics()
                final_scores_1.append(metrics_1["final_score"])
                mines_hit_1.append(metrics_1["number_of_mines_hit"])

                metrics_2 = agent2.get_gameplay_metrics()
                final_scores_2.append(metrics_2["final_score"])
                mines_hit_2.append(metrics_2["number_of_mines_hit"])

            final_score_1 = np.mean(final_scores_1)
            num_mines_hit_1 = np.mean(mines_hit_1)

            final_score_2 = np.mean(final_scores_2)
            num_mines_hit_2 = np.mean(mines_hit_2)

            performance_dict_1[mine_density]["final_score"] = final_score_1
            performance_dict_1[mine_density][
                "number_of_mines_hit"] = num_mines_hit_1

            performance_dict_2[mine_density]["final_score"] = final_score_2
            performance_dict_2[mine_density][
                "number_of_mines_hit"] = num_mines_hit_2

        mine_densities_1 = performance_dict_1.keys()
        final_scores_1 = [
            performance_dict_1[density]["final_score"]
            for density in performance_dict_1
        ]
        mines_hit_1 = [
            performance_dict_1[density]["number_of_mines_hit"]
            for density in performance_dict_1
        ]

        mine_densities_2 = performance_dict_2.keys()
        final_scores_2 = [
            performance_dict_2[density]["final_score"]
            for density in performance_dict_2
        ]
        mines_hit_2 = [
            performance_dict_2[density]["number_of_mines_hit"]
            for density in performance_dict_2
        ]

        plt.plot(mine_densities_1,
                 final_scores_1,
                 marker='o',
                 label="Normal CSP Agent")
        plt.plot(mine_densities_2,
                 final_scores_2,
                 marker='o',
                 label="Bonus CSP Agent")
        plt.xlabel("Mine Density")
        plt.ylabel("Average Final Score")
        plt.savefig('avg_final_score_bonus.png')
        plt.legend()
        plt.close()

        plt.plot(mine_densities_1,
                 mines_hit_1,
                 marker='o',
                 label="Normal CSP Agent")
        plt.plot(mine_densities_2,
                 mines_hit_2,
                 marker='o',
                 label="Bonus CSP Agent")
        plt.xlabel("Mine Density")
        plt.ylabel("Average Density of Mines Hit")
        plt.savefig('avg_density_of_mines_hit_bonus.png')
        plt.legend()
        plt.close()

        return performance_dict_1, performance_dict_2
Example #13
0
from flask import Flask
from flask_restx import Api
from flask_bcrypt import Bcrypt
from flask_jwt_extended import JWTManager

# Configuración Inicial Flask
app = Flask(__name__)
app.config['JWT_SECRET_KEY'] = "12usuario12"
app.config['JWT_ALGORITHM'] = "HS256"

bcrypt = Bcrypt(app)
jwt = JWTManager(app)

api = Api(
    app,
    title='Usuarios corporativos',
    version='2.0',
    description='Creando servicio a usuarios corporativos',
)

from utils.environment import Environment

config = Environment().settingsGeneral()
Example #14
0
from flask import Flask
from flask_restx import Api

from flask_bcrypt import Bcrypt  #JWT
from flask_jwt_extended import JWTManager  #JWT

app = Flask(__name__)
app.config['JWT_SECRET_KEY'] = "HolaMensajedePruebas"  #JWT
app.config['JWT_ALGORITHM'] = "HS256"  #JWT

bcrypt = Bcrypt(app)
jwt = JWTManager(app)

api = Api(
    app,
    title='Proyecto API CRUD Users',
    version='1.0',
    description='aplicacion CRUD',
)

from utils.environment import Environment
config = Environment().configGeneral()
Example #15
0
 def get_vin_box(self):
     vin = Environment().get_inited_config().vin_success
     box = Environment().get_inited_config().box_success
     L.d('车架号:%s 盒子号 %s' % (vin, box))
     return [vin, box]
Example #16
0
	def plot_performance_metrics(self):
		algo_performance = dict()

		env = Environment(n = 20,
						  p = self.probability_of_obstacles,
						  fire = self.fire)

		# env.generate_maze()
		# env.create_graph_from_maze()

		for n in np.arange(10, 100, 10):
			print(n)

			for algo in ["astar"]:
				for heuristic in ["euclid", "manhattan"]:
					print(algo, heuristic)

					if (algo + "_" + heuristic) not in algo_performance:
						algo_performance[(algo + "_" + heuristic)] = dict()
				
					if algo not in algo_performance:
						algo_performance[algo] = dict()
				
					env.algorithm = algo
					env.create_graph_from_maze()
				
					path_finder = PathFinderAlgorithm(environment = env,
													  algorithm = algo,
													  visual = self.visual,
													  heuristic = self.heuristic)
					path_finder.run_path_finder_algorithm()
					performances = path_finder.performance_dict
				
					if "path_length" not in algo_performance[(algo + "_" + heuristic)]:
						algo_performance[(algo + "_" + heuristic)]["path_length"] = []

					if "maximum_fringe_size" not in algo_performance[(algo + "_" + heuristic)]:
						algo_performance[(algo + "_" + heuristic)]["maximum_fringe_size"] = []

					if "number_of_nodes_expanded" not in algo_performance[(algo + "_" + heuristic)]:
						algo_performance[(algo + "_" + heuristic)]["number_of_nodes_expanded"] = []

					algo_performance[(algo + "_" + heuristic)]['path_length'].append(performances['path_length'])
					algo_performance[(algo + "_" + heuristic)]['maximum_fringe_size'].append(performances['maximum_fringe_size'])
					algo_performance[(algo + "_" + heuristic)]['number_of_nodes_expanded'].append(performances['number_of_nodes_expanded'])

			for algo in ["astar", "thin_astar"]:
				for heuristic in ["euclid", "manhattan"]:
					print(algo, heuristic)

					if (algo + "_" + heuristic) not in algo_performance:
						algo_performance[(algo + "_" + heuristic)] = dict()

					env.algorithm = algo

					path_finder = PathFinderAlgorithm(environment = env,
													  algorithm = algo,
													  q = q,
													  visual = self.visual,
													  heuristic = heuristic)
					path_finder.run_path_finder_algorithm()
					performances = path_finder.performance_dict

					if "path_length" not in algo_performance[(algo + "_" + heuristic)]:
						algo_performance[(algo + "_" + heuristic)]["path_length"] = []

					if "maximum_fringe_size" not in algo_performance[(algo + "_" + heuristic)]:
						algo_performance[(algo + "_" + heuristic)]["maximum_fringe_size"] = []

					if "number_of_nodes_expanded" not in algo_performance[(algo + "_" + heuristic)]:
						algo_performance[(algo + "_" + heuristic)]["number_of_nodes_expanded"] = []

					algo_performance[(algo + "_" + heuristic)]['path_length'].append(performances['path_length'])
					algo_performance[(algo + "_" + heuristic)]['maximum_fringe_size'].append(performances['maximum_fringe_size'])
					algo_performance[(algo + "_" + heuristic)]['number_of_nodes_expanded'].append(performances['number_of_nodes_expanded'])

		return algo_performance
    def load_parameters(self):
        """
		Loading parameters and setting up variables from the ROS environment.
		"""
        # ----- General Setup ----- #
        self.prefix = rospy.get_param("setup/prefix")
        pick = rospy.get_param("setup/start")
        place = rospy.get_param("setup/goal")
        self.start = np.array(pick) * (math.pi / 180.0)
        self.goal = np.array(place) * (math.pi / 180.0)
        self.goal_pose = None if rospy.get_param(
            "setup/goal_pose") == "None" else rospy.get_param(
                "setup/goal_pose")
        self.T = rospy.get_param("setup/T")
        self.timestep = rospy.get_param("setup/timestep")
        self.save_dir = rospy.get_param("setup/save_dir")
        self.feat_list = rospy.get_param("setup/feat_list")
        self.weights = rospy.get_param("setup/feat_weights")
        self.INTERACTION_TORQUE_THRESHOLD = rospy.get_param(
            "setup/INTERACTION_TORQUE_THRESHOLD")
        self.INTERACTION_TORQUE_EPSILON = rospy.get_param(
            "setup/INTERACTION_TORQUE_EPSILON")

        # Openrave parameters for the environment.
        model_filename = rospy.get_param("setup/model_filename")
        object_centers = rospy.get_param("setup/object_centers")
        self.environment = Environment(model_filename, object_centers)

        # ----- Planner Setup ----- #
        # Retrieve the planner specific parameters.
        planner_type = rospy.get_param("planner/type")
        if planner_type == "trajopt":
            max_iter = rospy.get_param("planner/max_iter")
            num_waypts = rospy.get_param("planner/num_waypts")

            # Initialize planner and compute trajectory to track.
            self.planner = TrajoptPlanner(self.feat_list, max_iter, num_waypts,
                                          self.environment)
        else:
            raise Exception('Planner {} not implemented.'.format(planner_type))

        self.traj = self.planner.replan(self.start, self.goal, self.goal_pose,
                                        self.weights, self.T, self.timestep)
        self.traj_plan = self.traj.downsample(self.planner.num_waypts)

        # Track if you have reached the start/goal of the path.
        self.reached_start = False
        self.reached_goal = False

        # Save the intermediate target configuration.
        self.curr_pos = None

        # ----- Controller Setup ----- #
        # Retrieve controller specific parameters.
        controller_type = rospy.get_param("controller/type")
        if controller_type == "pid":
            # P, I, D gains.
            P = rospy.get_param("controller/p_gain") * np.eye(7)
            I = rospy.get_param("controller/i_gain") * np.eye(7)
            D = rospy.get_param("controller/d_gain") * np.eye(7)

            # Stores proximity threshold.
            epsilon = rospy.get_param("controller/epsilon")

            # Stores maximum COMMANDED joint torques.
            MAX_CMD = rospy.get_param("controller/max_cmd") * np.eye(7)

            self.controller = PIDController(P, I, D, epsilon, MAX_CMD)
        else:
            raise Exception(
                'Controller {} not implemented.'.format(controller_type))

        # Planner tells controller what plan to follow.
        self.controller.set_trajectory(self.traj)

        # Stores current COMMANDED joint torques.
        self.cmd = np.eye(7)

        # ----- Learner Setup ----- #
        constants = {}
        constants["UPDATE_GAINS"] = rospy.get_param("learner/UPDATE_GAINS")
        constants["MAX_WEIGHTS"] = rospy.get_param("learner/MAX_WEIGHTS")
        constants["FEAT_RANGE"] = rospy.get_param("learner/FEAT_RANGE")
        constants["P_beta"] = rospy.get_param("learner/P_beta")
        constants["alpha"] = rospy.get_param("learner/alpha")
        constants["n"] = rospy.get_param("learner/n")
        self.feat_method = rospy.get_param("learner/type")
        self.learner = PHRILearner(self.feat_method, self.feat_list,
                                   self.environment, constants)

        # ---- Experimental Utils ---- #
        self.expUtil = experiment_utils.ExperimentUtils(self.save_dir)
        # Update the list of replanned plans with new trajectory plan.
        self.expUtil.update_replanned_trajList(0.0, self.traj_plan.waypts)
        # Update the list of replanned waypoints with new waypoints.
        self.expUtil.update_replanned_wayptsList(0.0, self.traj.waypts)
Example #18
0
class MazeRunner():

	def __init__(self, maze_dimension, probability_of_obstacles, algorithm, visual, heuristic, fire):
		self.algorithm = algorithm
		self.maze_dimension = maze_dimension
		self.probability_of_obstacles = probability_of_obstacles
		self.visual = visual
		self.heuristic = heuristic
		self.fire = fire

	def create_environment(self, new_maze = None):

		# Create the maze
		self.env = Environment(algorithm = self.algorithm, n = self.maze_dimension, p = self.probability_of_obstacles, fire = self.fire)
		self.env.generate_maze(new_maze = new_maze)

		# Generate graph from the maze
		self.env.create_graph_from_maze()

	def run(self):

		# Run the path finding algorithm on the graph
		self.path_finder = PathFinderAlgorithm(environment = self.env,
											   algorithm = self.algorithm,
											   visual = self.visual,
											   heuristic = self.heuristic)
		self.path_finder.run_path_finder_algorithm()

	def find_solvable_map_size(self):
		dim_list = range(10, 250, 10)
		runtimes = dict()
		for dim in dim_list:
			print("Dim = " + str(dim))

			self.maze_dimension = dim
			self.create_environment()

			start = time()
			self.run()
			end = time() - start
			print(end)
			runtimes[dim] = end
			del self.path_finder

		plt.plot(dim_list, runtimes.values(), marker = "o")
		plt.figure()
		plt.show()

	def plot_performance_metrics(self):
		algo_performance = dict()

		env = Environment(n = 20,
						  p = self.probability_of_obstacles,
						  fire = self.fire)

		# env.generate_maze()
		# env.create_graph_from_maze()

		for n in np.arange(10, 100, 10):
			print(n)

			for algo in ["astar"]:
				for heuristic in ["euclid", "manhattan"]:
					print(algo, heuristic)

					if (algo + "_" + heuristic) not in algo_performance:
						algo_performance[(algo + "_" + heuristic)] = dict()
				
					if algo not in algo_performance:
						algo_performance[algo] = dict()
				
					env.algorithm = algo
					env.create_graph_from_maze()
				
					path_finder = PathFinderAlgorithm(environment = env,
													  algorithm = algo,
													  visual = self.visual,
													  heuristic = self.heuristic)
					path_finder.run_path_finder_algorithm()
					performances = path_finder.performance_dict
				
					if "path_length" not in algo_performance[(algo + "_" + heuristic)]:
						algo_performance[(algo + "_" + heuristic)]["path_length"] = []

					if "maximum_fringe_size" not in algo_performance[(algo + "_" + heuristic)]:
						algo_performance[(algo + "_" + heuristic)]["maximum_fringe_size"] = []

					if "number_of_nodes_expanded" not in algo_performance[(algo + "_" + heuristic)]:
						algo_performance[(algo + "_" + heuristic)]["number_of_nodes_expanded"] = []

					algo_performance[(algo + "_" + heuristic)]['path_length'].append(performances['path_length'])
					algo_performance[(algo + "_" + heuristic)]['maximum_fringe_size'].append(performances['maximum_fringe_size'])
					algo_performance[(algo + "_" + heuristic)]['number_of_nodes_expanded'].append(performances['number_of_nodes_expanded'])

			for algo in ["astar", "thin_astar"]:
				for heuristic in ["euclid", "manhattan"]:
					print(algo, heuristic)

					if (algo + "_" + heuristic) not in algo_performance:
						algo_performance[(algo + "_" + heuristic)] = dict()

					env.algorithm = algo

					path_finder = PathFinderAlgorithm(environment = env,
													  algorithm = algo,
													  q = q,
													  visual = self.visual,
													  heuristic = heuristic)
					path_finder.run_path_finder_algorithm()
					performances = path_finder.performance_dict

					if "path_length" not in algo_performance[(algo + "_" + heuristic)]:
						algo_performance[(algo + "_" + heuristic)]["path_length"] = []

					if "maximum_fringe_size" not in algo_performance[(algo + "_" + heuristic)]:
						algo_performance[(algo + "_" + heuristic)]["maximum_fringe_size"] = []

					if "number_of_nodes_expanded" not in algo_performance[(algo + "_" + heuristic)]:
						algo_performance[(algo + "_" + heuristic)]["number_of_nodes_expanded"] = []

					algo_performance[(algo + "_" + heuristic)]['path_length'].append(performances['path_length'])
					algo_performance[(algo + "_" + heuristic)]['maximum_fringe_size'].append(performances['maximum_fringe_size'])
					algo_performance[(algo + "_" + heuristic)]['number_of_nodes_expanded'].append(performances['number_of_nodes_expanded'])

		return algo_performance
#!/usr/bin/env python
# -*- coding: utf-8 -*-

# Librerias importadas
from utils.environment import Environment
import unittest
import json

# Modulos propios
from application import app
from utils.userGenerator import UserGenerator
generateUser = UserGenerator().generateUser

BASE_URL = "http://localhost:8080"
SIGNUP_URL = f"{BASE_URL}/home/signup"
LOGIN_URL = f"{BASE_URL}/home/login"
CHAT_URL = f"{BASE_URL}/chat"
FRIENDS_URL = f"{BASE_URL}/dashboard/friends"
PROFILE_URL = f"{BASE_URL}/dashboard"
MONGO = Environment().settingsDB()
Example #20
0
from utils import L
import sys
"""
run all case:
    python3 run.py

run one module case:
    python3 run.py test/test_home.py

run case with key word:
    python3 run.py -k <keyword>

"""

if __name__ == '__main__':
    env = Environment()
    xml_report_path = env.get_environment_info().xml_report
    html_report_path = env.get_environment_info().html_report
    # 开始测试
    args = [
        '-s', '-q', '--allure_features=My stdio1', '--alluredir',
        xml_report_path
    ]
    # args = ['-s', '-q'],'--allure_features=My stdio'
    self_args = sys.argv[1:]
    pytest.main(args)
    # 生成html测试报告
    cmd = 'allure generate %s -o %s' % (xml_report_path, html_report_path)
    #
    try:
        Shell.invoke(cmd)
Example #21
0
def gen_page_py():
    GenPages.gen_page_py()


class WatchHandler(PatternMatchingEventHandler):
    patterns = ["*.yaml"]

    def on_created(self, event):
        L.i('监听到文件: yaml 发生了变化')
        try:
            gen_page_py()
        except Exception as e:
            L.e('\n!!!!!!!---pages.yaml---!!!!!!\n解析文件 pages.yaml 错误\n'
                '请到{}路径下检查修改后重新保存.'.format(self.watch_path))


if __name__ == "__main__":
    event_handler = WatchHandler()
    full_path = Environment().get_environment_info().pages_yaml
    print(full_path)
    observer = Observer()
    observer.schedule(event_handler, full_path)
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Example #22
0
    def get_account():

        account = Environment().get_inited_config().account_success
        pwd = Environment().get_inited_config().password_success
        L.d('账号:%s 密码 %s' % (account, pwd))
        return [account, pwd]
Example #23
0
class MineSweeper():
    BasicAgent = "base_agent"
    CSPAgent = "csp_agent"

    def __init__(self,
                 ground_dimension=None,
                 mine_density=None,
                 agent_name=None,
                 visual=False,
                 end_game_on_mine_hit=True,
                 bonus_uncertain_p=0.0):
        self.ground_dimension = ground_dimension
        self.mine_density = mine_density
        self.agent_name = agent_name
        self.visual = visual
        self.end_game_on_mine_hit = end_game_on_mine_hit
        self.bonus_uncertain_p = bonus_uncertain_p
        self.use_probability_agent = False

        if self.bonus_uncertain_p > 0:
            self.use_probability_agent = True

    def create_environment(self):

        # Create the maze
        self.env = Environment(n=self.ground_dimension,
                               mine_density=self.mine_density,
                               visual=self.visual,
                               end_game_on_mine_hit=self.end_game_on_mine_hit)
        self.env.generate_environment()

    def run(self):

        # Use the agent to find mines in our mine-sweeper environment
        if self.agent_name == self.BasicAgent:
            self.mine_sweeper_agent = BaseAgent(env=self.env)
        elif self.agent_name == self.CSPAgent:
            self.mine_sweeper_agent = CSPAgent(
                env=self.env, end_game_on_mine_hit=self.end_game_on_mine_hit)
        else:
            self.mine_sweeper_agent = ProbCSPAgent(
                env=self.env,
                end_game_on_mine_hit=self.end_game_on_mine_hit,
                use_probability_agent=self.use_probability_agent,
                prob=self.bonus_uncertain_p)

        self.mine_sweeper_agent.play()
        metrics = self.mine_sweeper_agent.get_gameplay_metrics()
        # print("Game won = ", str(metrics["game_won"]))
        print("Number of mines hit = ", str(metrics["number_of_mines_hit"]))
        print("Number of mines flagged correctly = ",
              str(metrics["number_of_mines_flagged_correctly"]))
        print("Number of cells flagged incorrectly = ",
              str(metrics["number_of_cells_flagged_incorrectly"]))

        self.env.render_env(100)

    def get_performance(self):
        performance_dict_1 = dict()
        performance_dict_2 = dict()
        for mine_density in np.arange(0.01, 0.2, 0.01):
            print(mine_density)
            performance_dict_1[mine_density] = dict()
            performance_dict_2[mine_density] = dict()

            final_scores_1 = list()
            mines_hit_1 = list()
            correct_mines_1 = list()
            incorrect_mines_1 = list()

            final_scores_2 = list()
            mines_hit_2 = list()
            correct_mines_2 = list()
            incorrect_mines_2 = list()
            for _ in range(10):
                env = Environment(n=15,
                                  mine_density=mine_density,
                                  end_game_on_mine_hit=False,
                                  visual=False)
                env.generate_environment()

                agent1 = CSPAgent(env=env, end_game_on_mine_hit=False)
                agent1.play()

                agent2 = BonusCSPAgent(env=env, end_game_on_mine_hit=False)
                agent2.play()

                # agent = ProbCSPAgent(env = env,
                #                       end_game_on_mine_hit = False,
                #                       use_probability_agent = self.use_probability_agent,
                #                       prob = 0.3)
                # agent.play()

                metrics_1 = agent1.get_gameplay_metrics()
                final_scores_1.append(metrics_1["final_score"])
                mines_hit_1.append(metrics_1["number_of_mines_hit"])

                metrics_2 = agent2.get_gameplay_metrics()
                final_scores_2.append(metrics_2["final_score"])
                mines_hit_2.append(metrics_2["number_of_mines_hit"])

            final_score_1 = np.mean(final_scores_1)
            num_mines_hit_1 = np.mean(mines_hit_1)

            final_score_2 = np.mean(final_scores_2)
            num_mines_hit_2 = np.mean(mines_hit_2)

            performance_dict_1[mine_density]["final_score"] = final_score_1
            performance_dict_1[mine_density][
                "number_of_mines_hit"] = num_mines_hit_1

            performance_dict_2[mine_density]["final_score"] = final_score_2
            performance_dict_2[mine_density][
                "number_of_mines_hit"] = num_mines_hit_2

        mine_densities_1 = performance_dict_1.keys()
        final_scores_1 = [
            performance_dict_1[density]["final_score"]
            for density in performance_dict_1
        ]
        mines_hit_1 = [
            performance_dict_1[density]["number_of_mines_hit"]
            for density in performance_dict_1
        ]

        mine_densities_2 = performance_dict_2.keys()
        final_scores_2 = [
            performance_dict_2[density]["final_score"]
            for density in performance_dict_2
        ]
        mines_hit_2 = [
            performance_dict_2[density]["number_of_mines_hit"]
            for density in performance_dict_2
        ]

        plt.plot(mine_densities_1,
                 final_scores_1,
                 marker='o',
                 label="Normal CSP Agent")
        plt.plot(mine_densities_2,
                 final_scores_2,
                 marker='o',
                 label="Bonus CSP Agent")
        plt.xlabel("Mine Density")
        plt.ylabel("Average Final Score")
        plt.savefig('avg_final_score_bonus.png')
        plt.legend()
        plt.close()

        plt.plot(mine_densities_1,
                 mines_hit_1,
                 marker='o',
                 label="Normal CSP Agent")
        plt.plot(mine_densities_2,
                 mines_hit_2,
                 marker='o',
                 label="Bonus CSP Agent")
        plt.xlabel("Mine Density")
        plt.ylabel("Average Density of Mines Hit")
        plt.savefig('avg_density_of_mines_hit_bonus.png')
        plt.legend()
        plt.close()

        return performance_dict_1, performance_dict_2
import matplotlib.pyplot as plt
from utils.environment import Environment

print("")
print("#==============================#")
print("#  Define Global Variables")
print("#==============================#")
MAP_DIR = "./maps/test/"
MONSTER_DIR = "./monsters/"
WAVE_DIR = "./waves/test/"

print("")
print("#==============================#")
print("#  Define the Environment")
print("#==============================#")
environment = Environment(MAP_DIR, MONSTER_DIR, WAVE_DIR)
plt.pause(0.1)

print("")
print("#==============================#")
print("#  Walk Monster Path")
print("#==============================#")
print("#")
print("#  visualize the walking")
plt.figure(1)
boxProps = dict(boxstyle='round', facecolor='wheat', alpha=0.5)

winText = "You somehow did it ^.^"
looseText = "You lost...as suspected..."

frameRate = 0.01  # [s]
Example #25
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

# Librerias importadas
from flask import Flask
from flask_restx import Api
from flask_bcrypt import Bcrypt
from flask_jwt_extended import JWTManager
from flask_pymongo import PyMongo
from utils.environment import Environment

# Configuracion inicial de Flask
app = Flask(__name__)
env = Environment()

# Configuracion del JWT
settingsJWT = env.settingsJWT()
app.config['JWT_SECRET_KEY'] = settingsJWT['JWT_SECRET_KEY']
app.config['JWT_ALGORITHM'] = settingsJWT['JWT_ALGORITHM']
jwt = JWTManager(app)

# Configuracion de Pymongo
settingsDB = env.settingsDB()
app.config['MONGO_URI'] = settingsDB['MONGO_URI']
mongo = PyMongo(app)

bcrypt = Bcrypt(app)
app.config['PROPAGATE_EXCEPTIONS'] = True

api = Api(app,
          title='Pachaqtec Intranet',
Example #26
0
class FeatureElicitator():
	"""
	This class represents a node that moves the Jaco with PID control AND supports receiving human corrections online.
	Additionally, it implements a protocol for elicitating novel features from human input.

	Subscribes to:
		/$prefix$/out/joint_angles	- Jaco sensed joint angles
		/$prefix$/out/joint_torques - Jaco sensed joint torques

	Publishes to:
		/$prefix$/in/joint_velocity	- Jaco commanded joint velocities
	"""

	def __init__(self):
		# Create ROS node.
		rospy.init_node("feature_elicitator")

		# Load parameters and set up subscribers/publishers.
		self.load_parameters()
		self.register_callbacks()

		# Start admittance control mode.
		ros_utils.start_admittance_mode(self.prefix)

		# Publish to ROS at 100hz.
		r = rospy.Rate(100)

		print "----------------------------------"
		print "Moving robot, type Q to quit:"

		while not rospy.is_shutdown():

			if sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
				line = raw_input()
				if line == "q" or line == "Q" or line == "quit":
					break

			self.vel_pub.publish(ros_utils.cmd_to_JointVelocityMsg(self.cmd))
			r.sleep()

		print "----------------------------------"
		ros_utils.stop_admittance_mode(self.prefix)

	def load_parameters(self):
		"""
		Loading parameters and setting up variables from the ROS environment.
		"""
		# ----- General Setup ----- #
		self.prefix = rospy.get_param("setup/prefix")
		pick = rospy.get_param("setup/start")
		place = rospy.get_param("setup/goal")
		self.start = np.array(pick)*(math.pi/180.0)
		self.goal = np.array(place)*(math.pi/180.0)
		self.goal_pose = None if rospy.get_param("setup/goal_pose") == "None" else rospy.get_param("setup/goal_pose")
		self.T = rospy.get_param("setup/T")
		self.timestep = rospy.get_param("setup/timestep")
		self.save_dir = rospy.get_param("setup/save_dir")
		self.INTERACTION_TORQUE_THRESHOLD = rospy.get_param("setup/INTERACTION_TORQUE_THRESHOLD")
		self.INTERACTION_TORQUE_EPSILON = rospy.get_param("setup/INTERACTION_TORQUE_EPSILON")
		self.CONFIDENCE_THRESHOLD = rospy.get_param("setup/CONFIDENCE_THRESHOLD")
		self.N_QUERIES = rospy.get_param("setup/N_QUERIES")
		self.nb_layers = rospy.get_param("setup/nb_layers")
		self.nb_units = rospy.get_param("setup/nb_units")

		# Openrave parameters for the environment.
		model_filename = rospy.get_param("setup/model_filename")
		object_centers = rospy.get_param("setup/object_centers")
		feat_list = rospy.get_param("setup/feat_list")
		weights = rospy.get_param("setup/feat_weights")
		FEAT_RANGE = rospy.get_param("setup/FEAT_RANGE")
		feat_range = [FEAT_RANGE[feat_list[feat]] for feat in range(len(feat_list))]
        LF_dict = rospy.get_param("setup/LF_dict")
		self.environment = Environment(model_filename, object_centers, feat_list, feat_range, np.array(weights), LF_dict)

		# ----- Planner Setup ----- #
		# Retrieve the planner specific parameters.
		planner_type = rospy.get_param("planner/type")
		if planner_type == "trajopt":
			max_iter = rospy.get_param("planner/max_iter")
			num_waypts = rospy.get_param("planner/num_waypts")

			# Initialize planner and compute trajectory to track.
			self.planner = TrajoptPlanner(max_iter, num_waypts, self.environment)
		else:
			raise Exception('Planner {} not implemented.'.format(planner_type))

		self.traj = self.planner.replan(self.start, self.goal, self.goal_pose, self.T, self.timestep)
		self.traj_plan = self.traj.downsample(self.planner.num_waypts)

		# Track if you have reached the start/goal of the path.
		self.reached_start = False
		self.reached_goal = False
		self.feature_learning_mode = False
		self.interaction_mode = False

		# Save the intermediate target configuration. 
		self.curr_pos = None

		# Track data and keep stored.
		self.interaction_data = []
		self.interaction_time = []
		self.feature_data = []
		self.track_data = False

		# ----- Controller Setup ----- #
		# Retrieve controller specific parameters.
		controller_type = rospy.get_param("controller/type")
		if controller_type == "pid":
			# P, I, D gains.
			P = rospy.get_param("controller/p_gain") * np.eye(7)
			I = rospy.get_param("controller/i_gain") * np.eye(7)
			D = rospy.get_param("controller/d_gain") * np.eye(7)

			# Stores proximity threshold.
			epsilon = rospy.get_param("controller/epsilon")

			# Stores maximum COMMANDED joint torques.
			MAX_CMD = rospy.get_param("controller/max_cmd") * np.eye(7)

			self.controller = PIDController(P, I, D, epsilon, MAX_CMD)
		else:
			raise Exception('Controller {} not implemented.'.format(controller_type))

		# Planner tells controller what plan to follow.
		self.controller.set_trajectory(self.traj)

		# Stores current COMMANDED joint torques.
		self.cmd = np.eye(7)

		# ----- Learner Setup ----- #
		constants = {}
		constants["step_size"] = rospy.get_param("learner/step_size")
		constants["P_beta"] = rospy.get_param("learner/P_beta")
		constants["alpha"] = rospy.get_param("learner/alpha")
		constants["n"] = rospy.get_param("learner/n")
		self.feat_method = rospy.get_param("learner/type")
		self.learner = PHRILearner(self.feat_method, self.environment, constants)
    def _run_thinning_astar(self, root, dest):

        current_maze_copy = self.environment.maze.copy()
        simpler_maze = self._create_simpler_maze(maze=current_maze_copy)
        simpler_env = Environment(n=self.environment.n, p=self.environment.p)
        simpler_env.generate_maze(new_maze=simpler_maze)

        # Root is at a distance of 0 from itself
        root.distance_from_source = 0

        self.fringe = Q.PriorityQueue()
        self.fringe.put(root)

        self.visited.append(root)
        while self.fringe.queue:

            # Keep track of maximum fringe length
            fringe_length = len(self.fringe.queue)
            if fringe_length >= self.max_fringe_length:
                self.max_fringe_length = fringe_length

            temp_path = []
            node = self.fringe.get()

            if node not in self.visited:
                self.visited.append(node)

            node_children = node.get_children(node=node,
                                              algorithm=self.algorithm)

            for child in node_children:
                if child is None or child in self.visited:
                    continue

                if child not in self.fringe.queue:
                    child.parent = node

                    temp_simpler_maze = simpler_maze.copy()
                    temp_simpler_maze[child.row, child.column] = 4
                    simpler_env.modify_environment(new_maze=temp_simpler_maze)

                    child.distance_from_dest = self._calculate_thinning_heuristic(
                        simpler_maze_env=simpler_env,
                        node_row=child.row,
                        node_column=child.column)
                    child.distance_from_source = node.distance_from_source + 1
                    self.fringe.put(child)
                else:
                    if child.get_heuristic(
                    ) >= node.distance_from_source + child.distance_from_dest:
                        child.parent = node
                        child.distance_from_source = node.distance_from_source + 1

            # Get the path through which you reach this node from the root node
            flag = True
            temp_node = node
            while (flag):
                temp_path.append(temp_node)
                temp_node = temp_node.parent
                if temp_node is None:
                    flag = False
            temp_path_copy = temp_path.copy()

            # Update the color of the path which we found above by popping the root first and the subsequent nodes.
            while (len(temp_path) != 0):
                temp_node = temp_path.pop()

                # Visualisation Parameter added
                if self.visual == True:
                    self.environment.update_color_of_cell(
                        temp_node.row, temp_node.column)
                    self.environment.render_maze(title=self.title)

            # if you reach the destination, then break
            if (node == dest):
                break

            # We reset the entire path again to render a new path in the next iteration.
            while (len(temp_path_copy) != 0):
                temp_node = temp_path_copy.pop(0)

                # Visualisation Parameter added
                if self.visual == True:
                    self.environment.reset_color_of_cell(
                        temp_node.row, temp_node.column)
                    self.environment.render_maze(title=self.title)
Example #28
0
    parser.add_argument("--savefig", action="store_true", help="Save figure")
    parser.add_argument("--figdir", type=str, default='figures', help="Directory to place figures")
    parser.add_argument("-e", "--epsilon", type=float, default=0.25, help="Epsilon value for RDP algorithm")
    parser.add_argument("-n", "--solution", type=int, default=0, help="Number of solution")
    parser.add_argument("--video", action="store_true", help="Epsilon value for RDP algorithm")
    # parser.add_argument("-s", "--solver", type=str, default='impost', choices=('impost', 'astar', 'apf'), help="Solver")
    args = parser.parse_args()
    # print('Read targets data from ', args.targets)
    # with open(args.targets, 'r') as f:
    #     targets_data = load(f)

    print('Read solution data from ', args.result)
    with open(args.result, 'r') as f:
        data = load(f)

    env = Environment()
    for target_path in data[1:]:
        speed = target_path["items"][0]["length"] / target_path["items"][0]["duration"] * 3600
        env.add_ts(targets.LinearTarget((target_path["items"][0]["x"], target_path["items"][0]["y"]),
                                        (
                                            speed * cos(
                                                radians(target_path["items"][0]["begin_angle"])),
                                            speed * sin(
                                                radians(target_path["items"][0]["begin_angle"]))
                                        ),
                                        2))

    path = []
    time = 0
    first = True
    for segment in data[0]["items"]:
Example #29
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by zhouyuyan on 2017/5/3 14:14
from time import sleep
from Common.Element import *
from Common.action import ElementActions
from utils.environment import Environment

env = Environment().get_environment_info()


# """"登录页面测试步骤"""
#

def goto_login(self):
    global action
    action = ElementActions(driver=self.driver)
    sleep(4)
    my = get_name(self, "我的")
    check_my = lambda action: action.is_element_displayed(my)
    if check_my:
        my.click()
    sleep(2)
    try:

        lgbs = get_name(self, '登录')
        check_lgbs = lambda action: action.is_element_displayed(lgbs)

        if check_lgbs:
            lgbs.click()
        sleep(2)
Example #30
0
    def load_parameters(self):
        """
		Loading parameters and setting up variables from the ROS environment.
		"""

        # ----- General Setup ----- #
        self.prefix = rospy.get_param("setup/prefix")
        pick = rospy.get_param("setup/start")
        place = rospy.get_param("setup/goal")
        self.start = np.array(pick) * (math.pi / 180.0)
        self.goal = np.array(place) * (math.pi / 180.0)
        self.goal_pose = None if rospy.get_param(
            "setup/goal_pose") == "None" else rospy.get_param(
                "setup/goal_pose")
        self.T = rospy.get_param("setup/T")
        self.timestep = rospy.get_param("setup/timestep")
        self.feat_list = rospy.get_param("setup/feat_list")
        self.weights = rospy.get_param("setup/feat_weights")

        # Openrave parameters for the environment.
        model_filename = rospy.get_param("setup/model_filename")
        object_centers = rospy.get_param("setup/object_centers")
        self.environment = Environment(model_filename, object_centers)

        # ----- Planner Setup ----- #
        # Retrieve the planner specific parameters.
        planner_type = rospy.get_param("planner/type")
        if planner_type == "trajopt":
            max_iter = rospy.get_param("planner/max_iter")
            num_waypts = rospy.get_param("planner/num_waypts")

            # Initialize planner and compute trajectory to track.
            self.planner = TrajoptPlanner(self.feat_list, max_iter, num_waypts,
                                          self.environment)
        else:
            raise Exception('Planner {} not implemented.'.format(planner_type))

        self.traj = self.planner.replan(self.start, self.goal, self.goal_pose,
                                        self.weights, self.T, self.timestep)

        # Save the intermediate target configuration.
        self.curr_pos = None

        # ----- Controller Setup ----- #
        # Retrieve controller specific parameters.
        controller_type = rospy.get_param("controller/type")
        if controller_type == "pid":
            # P, I, D gains.
            P = rospy.get_param("controller/p_gain") * np.eye(7)
            I = rospy.get_param("controller/i_gain") * np.eye(7)
            D = rospy.get_param("controller/d_gain") * np.eye(7)

            # Stores proximity threshold.
            epsilon = rospy.get_param("controller/epsilon")

            # Stores maximum COMMANDED joint torques.
            MAX_CMD = rospy.get_param("controller/max_cmd") * np.eye(7)

            self.controller = PIDController(P, I, D, epsilon, MAX_CMD)
        else:
            raise Exception(
                'Controller {} not implemented.'.format(controller_type))

        # Planner tells controller what plan to follow.
        self.controller.set_trajectory(self.traj)

        # Stores current COMMANDED joint torques.
        self.cmd = np.eye(7)