Exemplo n.º 1
0
 def __init__(self, name, config):
     env.__init__(self, name)
     self.env = gym.make(name)
     self.history_step = config.history_step
     self.ob_dims = config.ob_dims
     self.act_repeat = config.act_repeat
     self.history = []
 def __init__(self, name, config):
     env.__init__(self, name)
     #self.env = gym.make(name)
     self.inner_step = 0
     self.config = config
     # load data
     self.data_loader = DataLoader(
         self.config.task_data_path,self.config.trajectory_data_path)
     self.data_loader.load_task_static() # 读取yellow 数据
     self.data_loader.get_trajectories() # 读取uber数据
     #self.data_loader.overall_position_normalization()
     self.data_loader.get_merge_task(self.config.aim_day_num) 
     print "task generation"
     self.task_generator = TaskGenerator()
     self.task_generator.gen_task_list(self.data_loader.zip_data) # 按分布采样生成task列表
     self.task_generator.set_poisson_distribution(self.config.poisson_lamda, self.config.poisson_episode_num)
     print "trajectory sampling size: %d" % len(self.data_loader.trajectory_data)
     # new simulator
     self.simulator = StateSimulator()
     # reset
     self.simulator.trajector.init_sampling( \
         self.data_loader.trajectory_data, self.config.trajector_sampling_size)  # 采样生成路线数据
     self.episode_task_num = self.config.episode_task_num
     self.speed_init()
     #self.task_sampling()
     #self.preprocess()
     # clear memeory
     # self.self.data_loader.reset() 
     # clear log
     self.log_file = codecs.open(self.config.log_file_path, "w", "utf8")
     self.log_file.close()
    def __init__(self, env_config):
        env.__init__(self, "trajEnv")
        #self.config = config
        self.config = self.load_config(env_config['config_path'])
        #self.env = gym.make(name)
        self.action_space = spaces.MultiDiscrete([self.config['max_par_size']] * self.config['max_task_size'])
        # #print self.action_space
        # par_feature = np.zeros((self.config['max_par_size'], self.config['par_feature_size']))
        # task_feature = np.zeros((self.config['max_task_size'], self.config['task_feature_size']))
        obs_size = self.config['max_par_size'] + self.config['max_task_size']
        feature_size = max(self.config['task_feature_size'], self.config['par_feature_size'])
        #self.observation_space = Box(low=-200, high=200, shape=(obs_size, feature_size), dtype=np.float64)
        self.observation_space = spaces.Tuple([
            # spaces.Box(
            #     low=-2000, high=2000, shape=(self.config['max_par_size'], self.config['par_feature_size']), dtype=np.float64),
            # spaces.Box(
            #     low=-2000, high=2000, shape=(self.config['max_task_size'], self.config['task_feature_size']), dtype=np.float64),
            spaces.Box(
                low=-20, high=20, shape=(self.config['max_task_size'], self.config['max_par_size']), dtype=np.float64),
            spaces.Box(
                low=-20, high=20, shape=(self.config['max_task_size'], self.config['max_par_size']), dtype=np.float64)
        ])
        #print self.observation_space
        self.inner_step = 0

        # load data
        self.data_loader = DataLoader(
            self.config['task_data_path'],self.config['trajectory_data_path'])
        self.data_loader.load_task_static() # 读取yellow 数据
        self.data_loader.get_trajectories() # 读取uber数据
        self.data_loader.overall_position_normalization()
        self.data_loader.get_merge_task(self.config['aim_day_num']) 
        print "task generation"
        self.task_generator = TaskGenerator()
        self.task_generator.gen_task_list(self.data_loader.zip_data) # 按分布采样生成task列表
        self.task_generator.set_poisson_distribution(self.config['poisson_lamda'], self.config['poisson_episode_num'])
        print "trajectory sampling size: %d" % len(self.data_loader.trajectory_data)
        # new simulator
        self.simulator = StateSimulator()
        # reset
        self.simulator.trajector.init_sampling( \
            self.data_loader.trajectory_data, self.config['trajector_sampling_size'])  # 采样生成路线数据
        self.episode_task_num = self.config['episode_task_num']
        self.speed_init()
        #self.task_sampling()
        #self.preprocess()
        # clear memeory
        # self.self.data_loader.reset() 
        # clear log
        self.log_file = codecs.open(self.config['log_file_path'], "w", "utf8")
        self.log_file.close()
Exemplo n.º 4
0
    def __init__(self, name, config):
        env.__init__(self, name)
        #self.env = gym.make(name)
        self.inner_step = 0
        self.config = config
        # load data
        self.data_loader = DataLoader(self.config.task_data_path,
                                      self.config.trajectory_data_path)
        self.data_loader.load_task_static()  # 读取yellow 数据
        self.data_loader.get_trajectories()  # 读取uber数据
        self.data_loader.overall_position_normalization()
        self.data_loader.get_merge_task(self.config.aim_day_num)
        print "task generation"
        self.task_generator = TaskGenerator()
        self.task_generator.gen_task_list(
            self.data_loader.zip_data)  # 按分布采样生成task列表
        self.task_generator.set_poisson_distribution(
            self.config.poisson_lamda, self.config.poisson_episode_num)
        print "trajectory sampling size: %d" % len(
            self.data_loader.trajectory_data)

        # energy
        self.energy = [
            3, 2, 2, 5, 1, 7, 5, 8, 9, 4, 6, 10, 6, 4, 7, 3, 1, 8, 4, 6, 10, 3,
            6, 4, 3, 6, 2, 2, 10, 1, 1, 1, 2, 4, 9, 6, 5, 3, 4, 7, 8, 2, 6, 1,
            8, 6, 2, 9, 10, 9, 5, 2, 4, 1, 9, 1, 1, 3, 1, 4, 4, 8, 9, 9, 5, 8,
            10, 6, 2, 8, 4, 2, 8, 4, 3, 1, 10, 2, 1, 5, 6, 6, 10, 2, 9, 6, 2,
            5, 4, 10, 4, 1, 9, 9, 6, 5, 10, 7, 10, 7
        ]
        # for i in range(self.config.participant_num):
        #     energy_save = random.randint(1, 10)
        #     self.energy.append(energy_save)

        # new simulator
        self.simulator = StateSimulator()
        # reset
        self.simulator.trajector.init_sampling( \
            self.data_loader.trajectory_data, self.config.trajector_sampling_size)  # 采样生成路线数据
        self.episode_task_num = self.config.episode_task_num
        self.speed_init()
        #self.task_sampling()
        #self.preprocess()
        # clear memeory
        # self.self.data_loader.reset()
        # clear log
        self.log_file = codecs.open(self.config.log_file_path, "w", "utf8")
        self.log_file.close()
Exemplo n.º 5
0
    def __init__(self, env_config):
        env.__init__(self, "trajEnv")
        #self.config = config
        self.config = self.load_config(env_config['config_path'])
        self.config["max_step"] = env_config["max_step"]
        self.config["episode_task_num"] = env_config["episode_task_num"]
        self.config["max_task_size"] = env_config["max_task_size"]
        self.config["participant_num"] = env_config["participant_num"]
        self.config["max_par_size"] = env_config["max_par_size"]

        #self.env = gym.make(name)
        self.action_space = spaces.MultiDiscrete([self.config['max_par_size']] * self.config['max_task_size'])
        # #print self.action_space
        # par_feature = np.zeros((self.config['max_par_size'], self.config['par_feature_size']))
        # task_feature = np.zeros((self.config['max_task_size'], self.config['task_feature_size']))
        obs_size = self.config['max_par_size'] + self.config['max_task_size']
        feature_size = max(self.config['task_feature_size'], self.config['par_feature_size'])
        #self.observation_space = Box(low=-200, high=200, shape=(obs_size, feature_size), dtype=np.float64)
        self.observation_space = spaces.Tuple([
            # spaces.Box(
            #     low=-2000, high=2000, shape=(self.config['max_par_size'], self.config['par_feature_size']), dtype=np.float64),
            # spaces.Box(
            #     low=-2000, high=2000, shape=(self.config['max_task_size'], self.config['task_feature_size']), dtype=np.float64),
            spaces.Box(
                low=-20, high=20, shape=(self.config['max_task_size'], self.config['max_par_size']), dtype=np.float64),
            spaces.Box(
                low=-20, high=20, shape=(self.config['max_task_size'], self.config['max_par_size']), dtype=np.float64)
        ])
        #print self.observation_space
        self.inner_step = 0

        # load data
        self.data_loader = DataLoader(
            self.config['task_data_path'],self.config['trajectory_data_path'])
        self.data_loader.load_task_static() # 读取yellow 数据
        self.data_loader.get_trajectories() # 读取uber数据
        self.data_loader.overall_position_normalization()
        self.data_loader.get_merge_task(self.config['aim_day_num']) 
        print "task generation"
        self.task_generator = TaskGenerator()
        self.task_generator.gen_task_list(self.data_loader.zip_data) # 按分布采样生成task列表
        self.task_generator.set_poisson_distribution(self.config['poisson_lamda'], self.config['poisson_episode_num'])
        print "trajectory sampling size: %d" % len(self.data_loader.trajectory_data)

        # energy
        self.energy = [3, 2, 2, 5, 1, 7, 5, 8, 9, 4, 6, 10, 6, 4, 7, 3, 1, 8, 4, 6, 10, 3, 6, 4, 3, 6, 2, 2, 10, 1, 1, 1, 2, 4, 9, 6, 5, 3, 4, 7, 8, 2, 6, 1, 8, 6, 2, 9, 10, 9, 5, 2, 4, 1, 9, 1, 1, 3, 1, 4, 4, 8, 9, 9, 5, 8, 10, 6, 2, 8, 4, 2, 8, 4, 3, 1, 10, 2, 1, 5, 6, 6, 10, 2, 9, 6, 2, 5, 4, 10, 4, 1, 9, 9, 6, 5, 10, 7, 10, 7]
        # for i in range(self.config.participant_num):
        #     energy_save = random.randint(1, 10)
        #     self.energy.append(energy_save)
        # clear memeory

        # new simulator
        self.simulator = StateSimulator()
        # reset
        self.simulator.trajector.init_sampling( \
            self.data_loader.trajectory_data, self.config['trajector_sampling_size'])  # 采样生成路线数据
        self.episode_task_num = self.config['episode_task_num']
        self.speed_init()
        #self.task_sampling()
        #self.preprocess()

        print "parm## %d %d %d %d %d " % (self.config["max_step"], \
            self.config["episode_task_num"], self.config["max_task_size"],  \
            self.config["participant_num"], self.config["max_par_size"])
        # self.self.data_loader.reset() 
        # clear log
        self.log_file = codecs.open(self.config['log_file_path'], "w", "utf8")
        self.log_file.close()