def __init__(self, config):
        """
        Constructor

        :param config:
        :return:
        """
        self.config = config
        self.logger = logging.getLogger("so_logger")
        self.data_provider = DataProvider(self.config)
        self.data_exporter = DataExporter(self.config)
    def run(self):
        """
        Execute the job
        :param:
        :return:
        """
        self.logger.info("Starting job: GuruDataProcessor\n")
        data_provider = DataProvider(self.config)
        data_exporter = DataExporter(self.config)

        # Read guru data
        df = data_provider.read_guru_user_data()
        df = df[[3, 4]]  # Salary and skills columns
        df.columns = ['cost', 'skills']
        df = df[(df.cost != "$0") & (df.skills != "UNKNOWN")]
        df = df.reset_index(drop=True)
        df = df.assign(user_id=df.index.values)
        df = df.assign(skills=df.apply(lambda x: x['skills'][:-1].split(','), axis=1))

        # Convert cost to integers
        user_df = df.assign(cost=df.apply(lambda x: int(x['cost'][1:]), axis=1))

        # Read skills data
        df = data_provider.read_guru_skill_data()
        df = df[[1]]
        df.columns = ['skill']
        skill_df = df.assign(skill_id=df.index.values)

        # Create multilabel binarizer
        mlb = MultiLabelBinarizer(classes=skill_df.skill.values)

        # One hot encoding of user skills
        skills = mlb.fit_transform(user_df['skills'])

        # Create dataset
        users = user_df.to_dict('records')
        for i in range(len(users)):
            users[i]['skills_array'] = skills[i]

        # Export csv files
        data_exporter.export_csv_file(user_df, "guru/guru_user_df.csv")
        data_exporter.export_csv_file(skill_df, "guru/guru_skill_df.csv")

        # Scaling factor for submodular function
        scaling_factor = 1

        # Create and export data object to be used in experiments
        # containing all methods related to guru data
        guru = GuruData(self.config, user_df, skill_df, users, scaling_factor)
        data_exporter.export_dill_file(guru, "guru/guru_data.dill")

        self.logger.info("Finished job: GuruDataProcessor")
示例#3
0
 def __init__(self, config_):
     """
     Constructor
     :param config_:
     :return:
     """
     self.config = config_
     self.logger = logging.getLogger("cuda_logger")
     data_provider = DataProvider(self.config)
     filename = self.config['city_state_creator'].get(
         'filename', 'city_states.dill')
     self.city_states = data_provider.read_city_states(filename)
     self.reg_models = data_provider.read_regression_models()
 def __init__(self, config_, year, month, weekday):
     """
     Constructor
     :param config_:
     :return:
     """
     self.config = config_
     self.year = year
     self.month = month
     self.weekday = weekday
     self.logger = logging.getLogger("cuda_logger")
     data_provider = DataProvider(self.config)
     hex_attr_df = data_provider.read_hex_bin_attributes()
     hex_attr_df['center'] = hex_attr_df.apply(self.calculate_bin_center,
                                               axis=1)
     self.hex_attr_df = hex_attr_df
示例#5
0
 def __init__(self, config_):
     """
     Constructor
     :param config_:
     :returns:
     """
     self.config = config_
     self.logger = logging.getLogger("cuda_logger")
     self.start_time = self.config["city_state_creator"]["start_time"]
     self.end_time = self.config["city_state_creator"]["end_time"]
     self.time_slice_duration = self.config["city_state_creator"][
         "time_slice_duration"]
     self.time_unit_duration = self.config["city_state_creator"][
         "time_unit_duration"]
     data_provider = DataProvider(self.config)
     hex_attr_df = data_provider.read_hex_bin_attributes()
     hex_dist_df = data_provider.read_hex_bin_distances()
     self.hex_bins = hex_attr_df['hex_id'].values
     self.hex_dist = hex_dist_df[[
         'pickup_bin', 'dropoff_bin', 'straight_line_distance'
     ]]
示例#6
0
    def __init__(self, config_):
        """
        Constructor
        :param config_:
        :return:
        """
        self.config = config_
        self.logger = logging.getLogger("gym_logger")
        data_provider = DataProvider(self.config)

        # City state parameters
        self.city_states = data_provider.read_city_states()
        self.hex_attr_df = data_provider.read_hex_bin_attributes()
        self.hex_bins = self.hex_attr_df['hex_id']

        self.T = len(self.city_states)  # Number of time steps
        self.S = len(self.hex_bins)  # Number of hex bins

        # Environment parameters
        self.num_drivers = self.config['env_parameters']['num_drivers']
        self.distribution = self.config['env_parameters'][
            'driver_distribution']
        self.next_free_timestep = np.zeros(
            self.num_drivers)  # Next free timestep for each driver
        self.total_driver_earnings = np.zeros(
            self.num_drivers)  # Total earnings for each driver

        # Environment action and observation space
        actions = [7 for i in range(self.S)]
        drivers = [self.num_drivers for i in range(self.S)]
        self.action_space = spaces.MultiDiscrete(actions)
        # self.observation_space = spaces.Tuple((
        #     # spaces.Discrete(self.T),  # Time step
        #     spaces.MultiDiscrete(drivers)  # Driver distribution
        # ))
        self.observation_space = spaces.MultiDiscrete(drivers)

        self.reset()
    def run(self):
        """
        This method executes the job
        :param:
        :return:
        """
        self.logger.info("Starting job: NeighborhoodDataExportJob\n")
        data_provider = DataProvider(self.config)
        data_exporter = DataExporter(self.config)
        hex_attr_df = data_provider.read_hex_bin_attributes()
        hex_bins = hex_attr_df['hex_id'].values

        data = {}
        for r in xrange(self.radius + 1):
            data[r] = {}
            for hex_bin in hex_bins:
                neighbors = hex_neighborhood(hex_bin, hex_attr_df, r)
                zero_vector = np.zeros(len(hex_bins))
                np.put(zero_vector, neighbors, 1)
                one_hot_encoding_vector = zero_vector
                data[r][hex_bin] = one_hot_encoding_vector

        data_exporter.export_neighborhood_data(data)
        self.logger.info("Finished job: NeighborhoodDataExportJob")
    def run(self):
        """
        Creates and runs training episode
        :param:
        :return:
        """
        data_provider = DataProvider(self.config)
        hex_attr_df = data_provider.read_hex_bin_attributes()
        hex_distance_df = data_provider.read_hex_bin_distances()
        city_states = data_provider.read_city_states(self.city_states_filename)
        neighborhood = data_provider.read_neighborhood_data()
        popular_bins = data_provider.read_popular_hex_bins()
        num_episodes = self.config['RL_parameters']['num_episodes']
        ind_episodes = self.config['RL_parameters']['ind_episodes']
        exp_decay_multiplier = self.config['RL_parameters']['exp_decay_multiplier']

        q_ind = None
        r_table = None
        xi_matrix = None

        best_episode = None
        best_model = {}

        progress_bar = tqdm(xrange(num_episodes))
        for episode_id in progress_bar:
            progress_bar.set_description("Episode: {}".format(episode_id))
            current_best = -1000000

            # Create episode
            ind_exploration_factor = np.e ** (-1 * episode_id * exp_decay_multiplier / ind_episodes)

            episode = Episode(self.config,
                              episode_id,
                              ind_exploration_factor,
                              hex_attr_df,
                              hex_distance_df,
                              city_states,
                              neighborhood,
                              popular_bins,
                              q_ind,
                              r_table,
                              xi_matrix)

            # Run episode
            tables = episode.run()
            q_ind = tables['q_ind']
            r_table = tables['r_table']
            xi_matrix = tables['xi_matrix']
            episode_tracker = tables['episode_tracker']

            # Uncomment for logging if running a job, comment during experiments
            # otherwise it leads to insanely huge logging output which is useless

            # self.logger.info("""
            #                  Expt: {} Episode: {} Earnings: {}
            #                  Pax rides: {} Relocation rides: {} Unmet demand: {}
            #                  """.format(self.expt_name, episode_id,
            #                             episode_tracker.gross_earnings,
            #                             episode_tracker.successful_waits,
            #                             episode_tracker.relocation_rides,
            #                             episode_tracker.unmet_demand))
            # self.logger.info("----------------------------------")

            self.training_tracker.update_RL_tracker(
                episode_id, episode_tracker.gross_earnings,
                episode_tracker.successful_waits, episode_tracker.unsuccessful_waits,
                episode_tracker.unmet_demand, episode_tracker.relocation_rides,
                episode_tracker.DET, episode_tracker.DPRT, episode_tracker.DWT,
                episode_tracker.DRT, episode_tracker.DCT)

            # Keep track of the best episode
            if self.objective == 'revenue':
                if episode_tracker.gross_earnings >= current_best:
                    best_episode = episode_tracker
                    current_best = best_episode.gross_earnings
            else:  # self.objective == 'pickups':
                if episode_tracker.successful_waits >= current_best:
                    best_episode = episode_tracker
                    current_best = episode_tracker.successful_waits

            # Keep track of the best model
            best_model['ind_exploration_factor'] = ind_exploration_factor
            best_model['config'] = self.config
            best_model['q_ind'] = q_ind
            best_model['r_table'] = r_table
            best_model['xi_matrix'] = xi_matrix
            best_model['training_tracker'] = self.training_tracker

        # After finishing training
        self.logger.info("Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}".format(self.expt_name,
                                                                         best_episode.gross_earnings,
                                                                         best_episode.successful_waits,
                                                                         best_episode.unmet_demand))
        return best_episode, best_model, self.training_tracker
    def run(self):
        """
        Execute the job
        :param:
        :return:
        """
        self.logger.info("Starting job: FreelancerDataProcessor\n")
        data_provider = DataProvider(self.config)
        data_exporter = DataExporter(self.config)

        # Read freelancer data
        df = data_provider.read_freelancer_user_data()
        df_cost = df[[1]]  # Salary/Hour
        df_skills = df[df.columns[4::2]]
        df_skills.replace(to_replace=["Other Skills"], value="", inplace=True)
        df_skills = (df_skills.iloc[:, 0].map(str) + ',' +
                     df_skills.iloc[:, 1].map(str) + ',' +
                     df_skills.iloc[:, 2].map(str) + ',' +
                     df_skills.iloc[:, 3].map(str) + ',' +
                     df_skills.iloc[:, 4].map(str) + ',' +
                     df_skills.iloc[:, 5].map(str))  # Skills

        user_df = pd.DataFrame()
        user_df['cost'] = df_cost.iloc[:, 0].tolist()
        # Converting all strings to lower case
        user_df['skills'] = df_skills.str.lower().tolist()

        user_df = user_df.reset_index(drop=True)
        user_df = user_df.assign(user_id=user_df.index.values)
        user_df = user_df.assign(skills=user_df.apply(
            lambda x: x['skills'][:-1].split(','), axis=1))

        # Convert cost to integers
        user_df.cost = user_df.cost.astype(int)

        # Read skills data
        df = data_provider.read_freelancer_skill_data()
        df = df[[1]]
        df.columns = ['skill']
        skill_df = df.assign(skill_id=df.index.values)

        # Create multilabel binarizer
        mlb = MultiLabelBinarizer(classes=skill_df.skill.values)

        # One hot encoding of user skills
        skills = mlb.fit_transform(user_df['skills'])

        # Create dataset
        users = user_df.to_dict('records')
        for i in range(len(users)):
            users[i]['skills_array'] = skills[i]

        # Export csv files
        data_exporter.export_csv_file(user_df,
                                      "freelancer/freelancer_user_df.csv")
        data_exporter.export_csv_file(skill_df,
                                      "freelancer/freelancer_skill_df.csv")

        # Scaling factor for submodular function
        scaling_factor = 1

        # Create and export data object to be used in experiments
        # containing all methods related to freelancer data
        freelancer = FreelancerData(self.config, user_df, skill_df, users,
                                    scaling_factor)
        data_exporter.export_dill_file(freelancer,
                                       "freelancer/freelancer_data.dill")

        self.logger.info("Finished job: FreelancerDataProcessor")
示例#10
0
    def run(self):
        """
        Creates and runs training episode
        :param:
        :return:
        """
        data_provider = DataProvider(self.config)
        hex_attr_df = data_provider.read_hex_bin_attributes()
        hex_distance_df = data_provider.read_hex_bin_distances()
        city_states = data_provider.read_city_states(
            self.test_parameters['city_states_filename'])
        model = data_provider.read_model(
            self.test_parameters['model_filename'])
        neighborhood = data_provider.read_neighborhood_data()
        popular_bins = data_provider.read_popular_hex_bins()

        q_ind = model['q_ind']
        r_table = model['r_table']
        xi_matrix = model['xi_matrix']

        episode_id = 0

        # Create episode
        ind_exploration_factor = 0.0

        episode = Episode(self.config, episode_id, ind_exploration_factor,
                          hex_attr_df, hex_distance_df, city_states,
                          neighborhood, popular_bins, q_ind, r_table,
                          xi_matrix, True)

        # Run episode
        tables = episode.run()
        q_ind = tables['q_ind']
        r_table = tables['r_table']
        xi_matrix = tables['xi_matrix']
        episode_tracker = tables['episode_tracker']

        self.testing_tracker.update_RL_tracker(
            0, episode_tracker.gross_earnings,
            episode_tracker.successful_waits,
            episode_tracker.unsuccessful_waits, episode_tracker.unmet_demand,
            episode_tracker.relocation_rides, episode_tracker.DET,
            episode_tracker.DPRT, episode_tracker.DWT, episode_tracker.DRT,
            episode_tracker.DCT)

        self.logger.info("""
                         Expt: {} Earnings: {}
                         Model: {}
                         Test day: {}
                         Num drivers: {}
                         Pax rides: {} Relocation rides: {} Unmet demand: {}
                         """.format(
            self.expt_name, episode_tracker.gross_earnings,
            self.test_parameters['model_filename'],
            self.test_parameters['city_states_filename'],
            self.config['RL_parameters']['num_drivers'],
            episode_tracker.successful_waits, episode_tracker.relocation_rides,
            episode_tracker.unmet_demand))
        self.logger.info("----------------------------------")

        return self.testing_tracker
 def __init__(self, config):
     self.config = config
     self.logger = logging.getLogger("baseline_logger")
     self.data_provider = DataProvider(self.config)
     self.data_exporter = DataExporter(self.config)