예제 #1
0
    def test_no_solver(self):
        generator_params = {
            "dataset_type": "hexagon",
            "n": 10,
            "time_periods": 144,
            "days": 1,
            "orders_density": 0.1,
            "number_of_cars": 2,
            "order_distr": "uniform"
        }

        np.random.seed(777)
        gen = Generator("testOrigSolvers", generator_params)
        graph_info = gen.generate()
        world_graph, idle_driver_locations, real_orders, onoff_driver_locations, random_average, dist = gen.load_complete_set()

        solver_params = {
            "dataset": graph_info,
            "alpha": 0.1,
            "wc": 0,
            "tag": "testOrigSolvers",
            "gamma": 0.9,
            "train_test_split": 0.5
        }

        solver = OrigNoSolver(**solver_params)
        solver.run()
예제 #2
0
    def load_dataset(self):
        '''
        load complete dataset

        note that orders are merged into a single day, and then sampled out of there
        '''
        dataset_params = self.params['dataset']
        gen = Generator(self.params['tag'], dataset_params)
        assert dataset_params['dataset_type'] == 'hexagon', "Only hexagon dataset supported"
        world, idle_driver_locations, real_orders, \
            onoff_driver_locations, random_average, dist = gen.load_complete_set(dataset_id=self.params['dataset']['dataset_id'])

        self.l_max = 9 # default 9 (1.5h orders) can be served max. should not matter if orders are "real" and not generated

        self.n_side = 6 # number of neighbors to travel
        self.M = dataset_params['n']
        self.N = dataset_params['n']
        self.mapped_matrix_int = np.reshape(np.arange(0,len(world)), (self.N, self.M)) # should be positive for some reason
        self.order_num_dist = None # should be used only for synthetic orders, we always generate orders ourselves
        self.order_time_dist = None
        self.order_price_dist = None
        self.idle_driver_dist_time = None # [time, mean, std] -- we generate total number of drivers not randomly, but load them from generator
        self.idle_driver_location_mat = idle_driver_locations
        self.onoff_driver_location_mat = onoff_driver_locations

        # collect all orders in one day and sample them
        self.order_real = np.array(real_orders)
        for i in np.arange(len(self.order_real)):
            self.order_real[i][2] = self.order_real[i][2] % 144 # merge all together

        self.order_sample_p = 1./self.days
예제 #3
0
    def load_env_params(self):
        '''
        load complete dataset

        note that orders are merged into a single day, and then sampled out of there
        '''
        dataset_params = self.params['dataset']
        gen = Generator(self.params['tag'], dataset_params)
        world, idle_driver_locations, real_orders, onoff_driver_locations, random_average, dist = gen.load_complete_set(dataset_id=self.params['dataset']['dataset_id'])
        params = {
            "world": world,
            "orders": real_orders,
            "order_sampling_rate": 1./self.days*self.params['dataset']['order_sampling_multiplier'],
            "drivers_per_node": idle_driver_locations[0,:],
            "n_intervals": self.params['dataset']['time_periods'],
            "wc": self.params['wc'],
            "count_neighbors": self.params['count_neighbors'] == 1,
            "weight_poorest": self.params['weight_poorest'] == 1,
            "normalize_rewards": self.params['normalize_rewards'] == 1,
            "minimum_reward": self.params['minimum_reward'] == 1,
            "include_income_to_observation": self.params['include_income_to_observation'] == 1,
            "poorest_first": self.params.get("poorest_first", 0) == 1
        }

        return params
    def test_chicago(self):
        generator_params = {
            "dataset_type": "chicago",
            "days": 5,
            "number_of_cars": 10,
            'order_sampling_multiplier': 1
        }
        gen = Generator("testOrientedSolver", generator_params)
        graph_info = gen.generate()
        world_graph, idle_driver_locations, real_orders, \
            onoff_driver_locations, random_average, dist = gen.load_complete_set()

        solver_params = {
            "dataset": graph_info,
            "wc": 0.1,
            "tag": "testOrientedSolver",
            "count_neighbors": 1,
            "weight_poorest": 0,
            "normalize_rewards": 1,
            "minimum_reward": 0,
            "include_income_to_observation": 0,
            "training_iterations": 10,
            "testing_epochs": 2
        }

        solver = OrientedSolver(**solver_params)
        solver.run()
예제 #5
0
 def load_dataset(self):
     '''
     load complete dataset
     note that orders are merged into a single day, and then sampled out of there
     '''
     dataset_params = self.params['dataset']
     gen = Generator(self.params['tag'], dataset_params)
     self.world, self.idle_driver_locations, self.real_orders, \
         self.onoff_driver_locations, random_average, dist = gen.load_complete_set(dataset_id=self.params['dataset']['dataset_id'])
예제 #6
0
    def test_include_observation(self):
        generator_params = {
            "dataset_type": "hexagon",
            "n": 10,
            "time_periods": 2,
            "days": 2,
            "orders_density": 2,
            "number_of_cars": 200,
            "order_distr": "star",
            "order_sampling_multiplier": 1
        }

        gen = Generator("testTaxiEnvBatch", generator_params)
        graph_info = gen.generate()
        world_graph, idle_driver_locations, real_orders, \
            onoff_driver_locations, random_average, dist = gen.load_complete_set()

        # use OrigSolver as wrapper for params
        orig_solver_params = {
            "dataset": graph_info,
            "alpha": 0.1,
            "wc": 0,
            "iterations": 1, # 1 epoch
            "tag": "testTaxiEnvBatch",
            "gamma": 0.9,
            "order_sampling_multiplier": 1
        }
        ca2c_params = {
            "dataset": graph_info,
            "wc": 0,
            "iterations": 1, # 1 epoch
            "tag": "testTaxiEnvBatch",
            "epsilon": 0.5,
            "gamma": 0.9,
            "learning_rate": 1e-3,
            "count_neighbors": 1,
            "weight_poorest": 0,
            "normalize_rewards": 1,
            "minimum_reward": 0,
            "batch_size": 20,
            "include_income_to_observation": 1,
            "testing_epochs": 2
        }
        solv = cA2CSolver(**ca2c_params)

        # driver+order dist + income + onehot node id + time
        assert solv.env.observation_space_shape == ((2+3)*len(world_graph) + generator_params["time_periods"],)
        observation = solv.env.reset()
        init_info = solv.env.get_reset_info()
        assert observation.shape == solv.env.observation_space_shape
        curr_state, info, income_mat = solv.observation_to_old_fashioned_info(observation, init_info)
        assert (income_mat == np.zeros((len(world_graph),3))).all()

        solv.run()
예제 #7
0
    def test_predict(self):
        '''
        Test predict function, that should return multidriver action for multidriver observation,
        given the trained single driver model under stable-baselines framework
        '''

        generator_params = {
            "dataset_type": "grid",
            "n": 3,
            "time_periods": 2,  # should work for any network
            "days": 2,
            "orders_density": 10,
            "number_of_cars": 10,
            "order_distr": "star",
            "order_sampling_multiplier": 1
        }

        gen = Generator("testGymSolver", generator_params)
        graph_info = gen.generate()
        world_graph, idle_driver_locations, real_orders, \
            onoff_driver_locations, random_average, dist = gen.load_complete_set()

        # use OrigSolver as wrapper for params
        solver_params = {
            "dataset": graph_info,
            "wc": 0.1,
            "tag": "testGymSolver",
            "count_neighbors": 1,
            "weight_poorest": 0,
            "normalize_rewards": 1,
            "minimum_reward": 0,
            "include_income_to_observation": 0,
            "num_cpu": 4,
            "training_iterations": 1000,  # check testing while training
            "testing_epochs": 2
        }

        solver = GymSolver(**solver_params)
        solver.train()
        solver.save()

        solver = GymSolver(**solver_params)
        solver.load()
        solver.test()

        solver_params["include_income_to_observation"] = 1
        solver_params["continuous_observation"] = 1
        solver = GymSolver(**solver_params)
        solver.run()
예제 #8
0
 def generate_datasets(self, force=False):
     if force:
         self.db.dataset.delete_many({'tag': self.tag})
     if self.db.dataset.find_one({'tag': self.tag}) != None:
         logging.info("Dataset for {} has been found".format(self.tag))
         return 0
     total_datasets = list(self.pm.get_data_param_sets())
     if len(total_datasets) == 0:
         raise Exception("No datasets generated, bad parameter values")
     generated = 0
     for p in total_datasets:
         gen = Generator(self.tag, p)
         dataset_info = gen.generate()
         self.db.dataset.insert_one(dataset_info)
         generated += 1
     return generated
예제 #9
0
    if args.model_type != "a":
        print("ok")
        net = DeBlurSingleNet(input_shape=(args.size[0], args.size[1], 3),
                              model_type=args.model_type)

        os.makedirs(save_folder, exist_ok=True)

        net.train_model(
            epochs=args.epochs,
            folder_weights_save=save_folder,
            path_weights_load=args.load_path,
            generator=Generator(
                folder_sharp_images=datasets_folder + "Training/" + "Sharp/",
                folder_blurred_images=datasets_folder + "Training/" +
                "Blurred/",
                batch_size=args.batch_size,
                image_exts=(".jpg", ".png", ".jpeg"),
                shuffle=True),
            generator_validation=Generator(
                folder_sharp_images=datasets_folder + "Validation/" + "Sharp/",
                folder_blurred_images=datasets_folder + "Validation/" +
                "Blurred/",
                batch_size=args.batch_size,
                image_exts=(".jpg", ".png", ".jpeg"),
                shuffle=True))
    else:
        net = DeBlurAdversarialNet(input_shape=(args.size[0], args.size[1], 3))

        os.makedirs(save_folder, exist_ok=True)
예제 #10
0
 def __init__(self):
     self.generator = Generator()
     self.tot_lists = 0
     self.start_date = None
     self.reverse = False
     self.list_name = "List"
예제 #11
0
    def test_init(self):
        generator_params = {
            "dataset_type": "hexagon",
            "n": 10,
            "time_periods": 144,
            "days": 2,
            "orders_density": 100,
            "number_of_cars": 110,
            "order_distr": "star",
            "order_sampling_multiplier": 1
        }

        gen = Generator("testTaxiEnvBatch", generator_params)
        graph_info = gen.generate()
        world_graph, idle_driver_locations, real_orders, \
            onoff_driver_locations, random_average, dist = gen.load_complete_set()

        # use OrigSolver as wrapper for params
        orig_solver_params = {
            "dataset": graph_info,
            "alpha": 0.1,
            "wc": 0,
            "iterations": 1, # 1 epoch
            "tag": "testTaxiEnvBatch",
            "gamma": 0.9
        }
        ca2c_params = {
            "dataset": graph_info,
            "wc": 0,
            "iterations": 1, # 1 epoch
            "tag": "testTaxiEnvBatch",
            "epsilon": 0.5,
            "gamma": 0.9,
            "learning_rate": 1e-3,
            "count_neighbors": 1,
            "weight_poorest": 0,
            "normalize_rewards": 1,
            "minimum_reward": 0,
            "batch_size": 2000,
            "include_income_to_observation": 0,
            "testing_epochs": 2
        }
        origSolv = OrigA2CSolver(**orig_solver_params)
        solv = cA2CSolver(**ca2c_params)

        init_observation = origSolv.env.reset_clean() # observation here is just driver and customer distributions
        temp = np.array(origSolv.env.target_grids) + origSolv.env.M * origSolv.env.N
        target_id_states = origSolv.env.target_grids + temp.tolist()
        stateprocessor = stateProcessor(target_id_states, origSolv.env.target_grids,origSolv.env.n_valid_grids)
        curr_s = stateprocessor.utility_conver_states(init_observation)
        normalized_init_observation = stateprocessor.utility_normalize_states(curr_s)
        init_observation2 = solv.env.reset() # observation here is a full set of drivers, customers, and context

        assert (normalized_init_observation.flatten()[:len(world_graph)] == init_observation2[:len(world_graph)]).all()
        mask = np.ones(len(world_graph))
        mask[0] = 0
        mask[9] = 0
        mask[99] = 0
        mask[90] = 0
        A = normalized_init_observation.flatten()[len(world_graph):2*len(world_graph)] * mask
        B = init_observation2[len(world_graph):2*len(world_graph)] * mask
        assert (A == B).all() # might be a bit different on the corners as there is order sampling involved

        solv.train()
        origSolv.train()