def test_obj_fun(inData, params, obj='u_pax', _plot=False): # computes ILP with a given obj function and returns KPIs of the solution from ExMAS.main import matching params.matching_obj = obj if "prob" in obj: params.minmax = 'max' else: params.minmax = 'min' if obj == 'worst': params.minmax = 'max' params.matching_obj = 'u_pax' inData = matching(inData, params, plot=False) if _plot: import matplotlib.pyplot as plt m_solution = inData.sblts.m.copy() fig, ax = plt.subplots() for col in m_solution.columns: if inData.sblts.rides.loc[col].selected == 0: m_solution[col] = inData.sblts.m[col] else: m_solution[col] = inData.sblts.m[col] * 5 ax.imshow(m_solution, cmap='Greys', interpolation='Nearest') ax.set_ylabel('rides') _ = ax.set_xlabel('trips') print('grey - feasible, black - selected') inData = calc_solution_PoA(inData) return KPIs(inData)
def level_3(inData, params): # second level of ExMAS inData, params = prepare_level3(inData, params) # parameterize for second ExMAS inData.requests = inData.transitize.requests2 # set the new requests for ExMAS params.nP = inData.requests.shape[0] inData = list_unmergables(inData) # list 's2s' rides that cannot be matched (they share the same traveller) # ExMAS 2 inData_copy = ExMAS.main(inData, params, plot=False) # pooling of 2nd level rides (s2s) inData = process_level3(inData, inData_copy) inData = matching(inData, params) # final solution inData.transitize.rides['solution_3'] = inData.sblts.rides.selected.values return inData, params
def level_3(inData, params): # set parameters for the second level of ExMAS inData, params = prepare_level3(inData, params) inData = list_unmergables(inData) inData_copy = ExMAS.main(inData, params, plot=False) # pooling of 2nd level rides (s2s) inData.transitize.rides3 = inData_copy.sblts.rides.copy() inData_copy.sblts.rides.to_csv( '{}_ridesExMAS2.csv'.format(EXPERIMENT_NAME)) inData = process_level3(inData) inData = matching(inData, params) # final solution #inData.sblts.requests.to_csv('{}_requestsExMAS2.csv'.format(EXPERIMENT_NAME)) inData.transitize.rides['solution_3'] = inData.sblts.rides.selected.values inData.transitize.rides.to_csv('{}_rides.csv'.format(EXPERIMENT_NAME)) return inData, params
def level_2(inData, params): if params.get('parallel', False): import dask.dataframe as dd # single_applied = to_apply[to_apply.degree==1].apply(lambda x: transitize(inData, x), axis=1) ddf = dd.from_pandas(inData.sblts.rides, npartitions=params.parallel) applied = ddf.map_partitions(lambda dframe: dframe.apply(lambda x: transitize(inData, x), axis=1)).compute( scheduler='processes') # applied = pd.concat([single_applied,applied.compute()]) else: to_apply = inData.sblts.rides[inData.sblts.rides.degree != 1] applied = to_apply.apply(lambda x: transitize(inData, x), axis=1) # main inData.transitize.requests2 = applied # 's2s' rides at second level # store results for rm matrix of 's2s' rides if inData.transitize.requests2[inData.transitize.requests2.transitizable].shape[0] > 0: inData.transitize.rm2 = pd.concat( inData.transitize.requests2[inData.transitize.requests2.efficient].df.values) inData.transitize.rm2['pax_id'] = inData.transitize.rm2.index.copy() inData.transitize.rm2['traveller'] = inData.transitize.rm2.index.copy() inData.transitize.requests2 = inData.transitize.requests2[ inData.transitize.requests2['efficient']] # store only efficient ones inData.transitize.requests2 = inData.transitize.requests2.apply( pd.to_numeric, errors='ignore') # needed after creating df from dicts if inData.transitize.requests2.shape[0] == 0: # early exit inData.logger.warn('No transitable rides, early exit') inData.transitize.rides.to_csv('rides.csv') else: inData.logger.warn('Transitizing: \t{} rides ' '\t{} transitizable ' '\t{} efficient'.format(inData.transitize.rides1.shape[0], inData.transitize.requests2[ inData.transitize.requests2.transitizable].shape[0], inData.transitize.requests2.shape[0])) inData.transitize.requests2['indexes_set'] = inData.transitize.requests2.apply(lambda x: set(x.indexes), axis=1) # set the indexes of first level rides in the second level rides inData.transitize.requests2['low_level_indexes'] = inData.transitize.requests2.apply( lambda x: inData.transitize.rm1[inData.transitize.rm1.ride == x.name].traveller.to_list(), axis=1) inData.transitize.requests2['low_level_indexes_set'] = inData.transitize.requests2.low_level_indexes.apply(set) inData.transitize.requests2['low_level_indexes'] = inData.transitize.requests2.low_level_indexes.apply(list) inData.transitize.requests2['pax_id'] = inData.transitize.requests2.index.copy() # store efficient 's2s' rides and concat to ExMAS rides to_concat = inData.transitize.requests2 to_concat['solution_0'] = 0 # they are not part of any previous solutions to_concat['solution_1'] = 0 to_concat['d2d_reference'] = to_concat.pax_id # store reference to previous ride to_concat['kind'] = 's2s' to_concat = to_concat[ ['indexes', 'u_pax', 'u_veh', 'kind', 'ttrav', 'orig_walk_time', 'dest_walk_time', 'times', 'u_paxes', 'solution_0', 'solution_1', 'low_level_indexes_set', 'origin', 'destination', 'd2d_reference']] inData.transitize.rides = pd.concat([inData.transitize.rides, to_concat]).reset_index() # store rm to_concat = inData.transitize.rm2 to_concat['ttrav'] = to_concat['s2s_ttrav'] to_concat['u'] = to_concat['u_sh'] to_concat = to_concat[['ride', 'traveller', 'dist', 'ttrav', 'delay', 'u', 'orig_walk_time', 'dest_walk_time']] # output for rm2 def get_ride_index(row): ride = inData.transitize.rides[inData.transitize.rides.d2d_reference == row.ride] if ride.shape[0] == 0: print(row, ride) raise AttributeError return ride.squeeze().name # get the proper id of a ride (in a concatenated df of .rides) to_concat['ride'] = to_concat.apply(lambda row: get_ride_index(row), axis=1) inData.transitize.rm = pd.concat([inData.transitize.rm, to_concat]) inData.transitize.requests2.index = inData.transitize.rides[inData.transitize.rides.kind == 's2s'].index.values inData.sblts.rides = inData.transitize.rides # store rides for ExMAS params.process_matching = False inData = matching(inData, params) # find a new solution with s2s inData.transitize.rides['solution_2'] = inData.sblts.rides.selected.values # this is a new solution at level 2 return inData, params
def prep_shared_rides(_inData, sp, _print=False): """ Determines which requests are shareable, computes the matching with ExMAS, generates schedule of shared (or non shared) rides :param _inData: :param sp: :param sblt: external ExMas library (import ExMAS.main as sblt) optional :param _print: :return: """ sp.share = sp.get('share', 0) # share of shareable rides requests = _inData.requests if sp.shape == 0: requests.shareable = False # all requests are not shareable elif sp.share == 1: requests.shareable = True # all requests are shareable else: # mixed - not fully tested, can be unstable requests.shareable = requests.apply( lambda x: False if random.random() >= sp.share else True, axis=1) if requests[requests.shareable].shape[0] == 0: # no sharing _inData.requests['ride_id'] = _inData.requests.index.copy( ) # rides are schedules, we do not share _inData.requests['position'] = 0 # everyone has first position # _inData.requests['sim_schedule'] = _inData.requests.apply(lambda x: , axis=1) else: # sharing if sp.without_matching: # was the shareability graph comouted before? _inData = matching(_inData, sp, plot=False) # if so, do only matching else: _inData = ExMAS.main(_inData, sp, plot=False) # compute graph and do matching # prepare schedules schedule = _inData.sblts.schedule r = _inData.sblts.requests schedule['nodes'] = schedule.apply( lambda s: [None] + list(r.loc[s.indexes_orig].origin.values) + list(r.loc[s.indexes_dest].destination.values), axis=1) schedule['req_id'] = schedule.apply( lambda s: [None] + s.indexes_orig + s.indexes_dest, axis=1) _inData.requests[ 'ride_id'] = r.ride_id # store ride index in requests for simulation _inData.requests[ 'position'] = r.position # store ride index in requests for simulation _inData.sblts.schedule['sim_schedule'] = _inData.sblts.schedule.apply( lambda x: make_schedule_shared(x), axis=1) def set_sim_schedule(x): if not x.shareable: return make_schedule_nonshared([x]) elif 'platform' in x and x.platform == -1: return make_schedule_nonshared([x]) else: return _inData.sblts.schedule.loc[x.ride_id].sim_schedule _inData.requests['sim_schedule'] = _inData.requests.apply( lambda x: set_sim_schedule(x), axis=1) _inData.schedules_queue = pd.DataFrame([[i, _inData.schedules[i].node[1]] for i in _inData.schedules.keys()], columns=[0, 'origin']).set_index(0) return _inData
def single_eval(inData, params, EXPERIMENT_NAME, MATCHING_OBJS, PRUNINGS, PRICING, minmax=('min', 'max'), store_res=True): inData = prunings.determine_prunings( inData, PRUNINGS) # set pruned to boolean flag for matching inData.results.rides['pruned_Pricing-{}_Pruning-{}'.format( PRICING, PRUNINGS)] = inData.sblts.rides['pruned'] inData.sblts.rides['platform'] = inData.sblts.rides.pruned.apply( lambda x: 1 if x else -1) # use only pruned rides in the matching inData.sblts.requests['platform'] = 1 inData.requests['platform'] = inData.requests.apply(lambda x: [1], axis=1) for params.matching_obj in MATCHING_OBJS: # for each objective function for params.minmax in minmax: # best and worst prices of anarchy res_name = 'Experiment-{}_Pricing-{}_Objective-{}_Pruning-{}_minmax-{}'.format( EXPERIMENT_NAME, PRICING, MATCHING_OBJS, PRUNINGS, params.minmax) # name of experiment inData.logger.warning(res_name) if 'TSE' not in PRUNINGS: inData = matching(inData, params, make_assertion=False) # < - main matching else: inData = prunings.algo_TSE( inData, params.matching_obj ) # here we do not do ILP, but heuristical algorithm inData = evaluate_shareability(inData, params) if store_res: inData.results.rides[ res_name] = inData.sblts.rides.selected.values # store results (selected rides) inData.sblts.rides.selected.name = res_name inData.results.rm = inData.results.rm.join( inData.sblts.rides.selected, on='ride') # store selected rides in the multiindex table rm = inData.results.rm pruneds = inData.sblts.rides[inData.sblts.rides.pruned == True].index rm['bestpossible_{}'.format(PRICING)] = rm.apply( lambda r: rm.loc[pruneds, :][ (rm.traveller == r.traveller)][PRICING].min(), axis=1) selecteds = rm.loc[inData.sblts.rides[ inData.sblts.rides.selected == True].index] inData.sblts.res['eq13'] = selecteds[ selecteds['bestpossible_{}'.format(PRICING)] == selecteds[PRICING]].shape[0] / selecteds.shape[0] inData.sblts.res['pricing'] = MATCHING_OBJS[ 0] # columns for KPIs table inData.sblts.res['algo'] = PRUNINGS[0] inData.sblts.res['experiment'] = EXPERIMENT_NAME inData.sblts.res['minmax'] = params.minmax inData.sblts.res['obj'] = params.matching_obj inData.results.KPIs[ res_name] = inData.sblts.res # stack columns with results return inData
def single_eval_windows(inData, params, EXPERIMENT_NAME, MATCHING_OBJS, PRUNINGS, PRICING, minmax=('min', 'max'), store_res=True): # evaluate windows-based approach # this has to be called last, since it screws the inData.sblts.rides params.multi_platform_matching = False params.assign_ride_platforms = True # clear inData.sblts.rides['pruned'] = True inData.sblts.mutually_exclusives = [] params.matching_obj = 'u_veh' windows = timewindow_benchmark.ExMAS_windows(inData, params) # compute degrees inData.sblts.rides['degree'] = inData.sblts.rides.apply( lambda x: len(x.indexes), axis=1) # delays inData.sblts.rides['treqs'] = inData.sblts.rides.apply( lambda x: inData.sblts.requests.loc[x.indexes].treq.values, axis=1) def calc_deps(r): deps = [r.times[0]] for d in r.times[1:r.degree]: deps.append(deps[-1] + d) # departure times t = windows.sblts.requests return deps windows.sblts.rides['deps'] = windows.sblts.rides.apply(calc_deps, axis=1) windows.sblts.rides[ 'delays'] = windows.sblts.rides['deps'] - windows.sblts.rides['treqs'] windows.sblts.rides['ttravs'] = windows.sblts.rides.apply(lambda r: [ sum(r.times[i + 1:r.indexes_orig.index(r.indexes[i]) + r.degree + 1 + r .indexes_dest.index(r.indexes[i])]) for i in range(r.degree) ], axis=1) multis = list() for i, ride in windows.sblts.rides.iterrows(): for t in ride.indexes: multis.append([ride.name, t]) multis = pd.DataFrame(index=pd.MultiIndex.from_tuples(multis)) multis['ride'] = multis.index.get_level_values(0) multis['traveller'] = multis.index.get_level_values(1) multis = multis.join(windows.sblts.requests[['treq', 'dist', 'ttrav']], on='traveller') multis = multis.join(windows.sblts.rides[[ 'u_veh', 'u_paxes', 'degree', 'indexes', 'ttravs', 'delays' ]], on='ride') multis['order'] = multis.apply(lambda r: r.indexes.index(r.traveller), axis=1) multis['ttrav_sh'] = multis.apply(lambda r: r.ttravs[r.order], axis=1) multis['delay'] = multis.apply(lambda r: r.delays[r.order], axis=1) # multis['u'] = multis.apply(lambda r: r.u_paxes[r.order], axis=1) multis['shared'] = multis.degree > 1 multis['ride_time'] = multis.u_veh multis = multis[[ 'ride', 'traveller', 'shared', 'degree', 'treq', 'ride_time', 'dist', 'ttrav', 'ttrav_sh', 'delay' ]] windows.sblts.rides_multi_index = multis windows = pricings.update_costs(windows, params) for params.matching_obj in ['u_veh']: # two objective functions for params.minmax in ['min', 'max']: # best and worst prices of anarchy res_name = 'Experiment-{}_Pricing-{}_Objective-{}_Pruning-{}_minmax-{}'.format( EXPERIMENT_NAME, PRICING, MATCHING_OBJS, PRUNINGS, params.minmax) # name of experiment inData.logger.warning(res_name) windows = matching(windows, params, make_assertion=False) # < - main matching windows = evaluate_shareability(windows, params) windows.sblts.res['pricing'] = MATCHING_OBJS[0] windows.sblts.res['algo'] = PRUNINGS[0] windows.sblts.res['minmax'] = params.minmax windows.sblts.res['obj'] = params.matching_obj inData.results.KPIs[res_name] = windows.sblts.res return inData
def evolve(inData, params, _print=False, _plot=False): """ Day to Day evolution of virus spreading from initial population starts with initial share of infected population and gradually infects co-riders :param inData: :param params: :param _print: :param _plot: :return: """ def recovery(x): # did I recover today (after quarantine) if x.quarantine_day == None: return x.state else: if x.quarantine_day + params.corona.recovery == day: return 'R' # back else: return x.state # initialise day = 0 ret = dict() # report - trace number of passengers in each state inData = make_population( inData, params ) # determine who is active (pool to draw everyday) who is active today # and initially infected ret[day] = inData.passengers.groupby('state').size() while "I" in ret[ day].index: # main D2D loop, until there are still infected (we do not care about Quarantined) day += 1 # incerement inData.logger.info('day {}'.format(day)) # quarantines inData.passengers['newly_quarantined'] = inData.passengers.apply( lambda r: False if r.infection_day is None else day - r. infection_day == params.corona.time_to_quarantine, axis=1) # are there newly quarantined travellers? inData.passengers.quarantine_day = inData.passengers.apply( lambda x: day if x.newly_quarantined else x.quarantine_day, axis=1) inData.passengers.state = inData.passengers.apply( lambda r: 'Q' if r.newly_quarantined else r.state, axis=1) # recoveries inData.passengers.state = inData.passengers.apply( lambda x: recovery(x), axis=1) # active today active_ones = inData.passengers[(inData.passengers.active == True)] active_ones = active_ones.sample( int(active_ones.shape[0] * params.corona.p)) # those are active today active_ones = active_ones[active_ones.state != 'Q'] # except those quarantined inData.passengers['active_today'] = False inData.passengers['active_today'].loc[ active_ones. index] = True # those will be matched and then may be infected # if platform is [-1] passenger is not matched inData.passengers['platforms'] = inData.passengers.apply( lambda x: [0] if x.active_today else [-1], axis=1) inData.requests['platform'] = inData.requests.apply( lambda row: inData.passengers.loc[row.name].platforms[0], axis=1) inData.sblts.requests['platform'] = inData.requests['platform'] # redo matching inData = matching(inData, params, _print) #output KPIs retout = inData.passengers.groupby('state').size() r = inData.sblts.requests schedule = inData.sblts.schedule schedule['ttrav'] = schedule.apply(lambda x: sum(x.times[1:]), axis=1) retout['VehHourTrav'] = schedule.ttrav.sum() retout['VehHourTrav_ns'] = r.ttrav.sum() retout['PassHourTrav'] = r.ttrav_sh.sum() retout['PassHourTrav_ns'] = r.ttrav.sum() retout['PassUtility'] = r.u_sh.sum() retout['PassUtility_ns'] = r.u.sum() retout['nRides'] = schedule.shape[0] # and infect inData = infect(inData, day, params) ret[day] = retout inData.logger.info(ret[day]) inData.logger.info( 'Day: {}\t infected: {}\t quarantined: ' '{}\t recovered: {} \t susceptible: {}, active today: {}.'.format( day, inData.passengers[inData.passengers.state == "I"].shape[0], inData.passengers[inData.passengers.state == "Q"].shape[0], inData.passengers[inData.passengers.state == "R"].shape[0], inData.passengers[inData.passengers.state == "S"].shape[0], inData.passengers[inData.passengers.active_today == True].shape[0])) # go to next day (if still anyone is infected) if day > params.corona.get('max_days', 99999): # early exit break # end of the loop inData.report = pd.DataFrame(ret) if _plot: plot_spread(inData) if params.get('report', False): # store results to csvs replication_id = random.randint(0, 100000) ret = inData.report.T.fillna(0) filename = "experiment-{}_nP-{}_init-{}_p-{}_quarantine-{}_recovery-{}_repl-{}.csv".format( params.corona.EXPERIMENT_NAME, params.nP * params.corona.participation, params.corona.initial_share, params.corona.p, params.corona.time_to_quarantine, params.corona.recovery, replication_id) ret.to_csv( "ExMAS/data/corona/corona_" + filename) # day to day evolution of travellers in each state inData.passengers.to_csv( "ExMAS/data/corona/population_" + filename) # pax info (when infected and by whom) return inData
def level_2(inData, params): to_swifter = inData.sblts.rides[inData.sblts.rides.degree != 1] if params.get('swifter', False): from_swifter = to_swifter.swifter.apply( lambda x: transitize(inData, x), axis=1) else: from_swifter = to_swifter.apply(lambda x: transitize(inData, x), axis=1) # main inData.transitize.requests2 = from_swifter if inData.transitize.requests2[ inData.transitize.requests2.transitizable].shape[0] > 0: inData.transitize.rm2 = pd.concat(inData.transitize.requests2[ inData.transitize.requests2.transitizable].df.values) inData.transitize.rm2['pax_id'] = inData.transitize.rm2.index.copy() inData.transitize.rm2['traveller'] = inData.transitize.rm2.index.copy() inData.transitize.requests2 = inData.transitize.requests2[ inData.transitize.requests2['efficient']] inData.transitize.requests2 = inData.transitize.requests2.apply( pd.to_numeric, errors='ignore') if inData.transitize.requests2.shape[0] == 0: inData.logger.warn('No transitable rides') inData.transitize.rides.to_csv('rides.csv') else: inData.logger.warn( 'Transitizing: \t{} rides ' '\t{} transitizable ' '\t{} efficient'.format( inData.transitize.rides1.shape[0], inData.transitize.requests2[ inData.transitize.requests2.transitizable].shape[0], inData.transitize.requests2.shape[0])) inData.transitize.rm2.to_csv('{}_rm2.csv'.format(EXPERIMENT_NAME)) inData.transitize.requests2[ 'indexes_set'] = inData.transitize.requests2.apply( lambda x: set(x.indexes), axis=1) # set the indexes of first level rides in the second level rides inData.transitize.requests2[ 'low_level_indexes'] = inData.transitize.requests2.apply( lambda x: inData.transitize.rm1[inData.transitize.rm1.ride == x .name].traveller.to_list(), axis=1) inData.transitize.requests2[ 'low_level_indexes_set'] = inData.transitize.requests2.low_level_indexes.apply( set) inData.transitize.requests2[ 'low_level_indexes'] = inData.transitize.requests2.low_level_indexes.apply( list) inData.transitize.requests2[ 'pax_id'] = inData.transitize.requests2.index.copy() to_concat = inData.transitize.requests2 to_concat['solution_0'] = 0 to_concat['solution_1'] = 0 to_concat['d2d_reference'] = to_concat.pax_id to_concat['kind'] = 's2s' to_concat = to_concat[[ 'indexes', 'u_pax', 'u_veh', 'kind', 'ttrav', 'orig_walk_time', 'dest_walk_time', 'times', 'u_paxes', 'solution_0', 'solution_1', 'low_level_indexes_set', 'origin', 'destination', 'd2d_reference' ]] inData.transitize.rides = pd.concat( [inData.transitize.rides, to_concat]).reset_index() to_concat = inData.transitize.rm2 to_concat['ttrav'] = to_concat['s2s_ttrav'] inData.transitize.rm = pd.contat([ inData.transitize.rm, to_concat[[ 'ride', 'traveller', 'dist', 'ttrav', 'delay', 'u_sh', 'origin_walk_time', 'dest_walk_time', 'delay' ]] ]) inData.transitize.requests2.index = inData.transitize.rides[ inData.transitize.rides.kind == 's2s'].index.values #inData.transitize.requests2.to_csv('{}_requests2.csv'.format(EXPERIMENT_NAME)) inData.sblts.rides = inData.transitize.rides params.process_matching = False inData = matching(inData, params) # solution with s2s inData.transitize.rides[ 'solution_2'] = inData.sblts.rides.selected.values #inData.transitize.rides.to_csv('{}_rides2.csv'.format(EXPERIMENT_NAME)) return inData, params