def take_trip(self, trip, vehicle): trip_pick = self.env.process(self.pick_trip(trip, vehicle)) yield trip_pick | trip.cancellation if not trip_pick.triggered: trip_pick.interrupt() lg.info(f'Trip {trip.id} cancelled at {self.env.now}') else: yield self.env.timeout(trip.duration) vehicle.drop_off(trip) vehicle.trip_end.succeed() vehicle.trip_end = self.env.event() self.vehicle_id = vehicle.id trip.mode = 'finished' self.trip_list.append(trip) trip.info['mode'] = 'finished' vehicle.reward['revenue'] += max(((trip.distance * 1.11 + trip.duration * 0.31) + 2), 5) - \ float(trip.info['waiting_time']) * 0.10 vehicle.profit += max(((trip.distance * 1.11 + trip.duration * 0.31) + 2), 5) - \ float(trip.info['waiting_time']) * 0.10 k = ceil((self.env.now - vehicle.decision_time) / 15) vehicle.reward[ 'revenue'] = vehicle.reward['revenue'] * self.learner.Gamma**k if isinstance(vehicle.reward['revenue'], np.ndarray): vehicle.reward['revenue'] = vehicle.reward['revenue'][0]
def finish_discharge(self, charging_station, vehicle): # the vehicle either finishes the charging process or interrupts it try: yield self.env.timeout(vehicle.discharge_duration) vehicle.finish_discharging(charging_station) vehicle.discharging_demand['SOC_end'] = vehicle.charge_state vehicle.discharging_end.succeed() vehicle.discharging_end = self.env.event() except simpy.Interrupt: old_SOC = vehicle.charge_state vehicle.charge_state -= float( (charging_station.power * (float(self.env.now) - vehicle.t_start_discharging)) / (vehicle.battery_capacity / 100)) vehicle.discharging_demand['SOC_end'] = vehicle.charge_state for j in range(0, 24): if j * 60 <= self.env.now % 1440 <= (j + 1) * 60: h = j vehicle.reward['discharging'] += ( vehicle.charge_state - old_SOC) / 100 * 50 * charging_cost[h] / 100 vehicle.profit += (vehicle.charge_state - old_SOC) / 100 * 50 * charging_cost[h] / 100 k = ceil((self.env.now - vehicle.decision_time) / 15) vehicle.reward['discharging'] = vehicle.reward[ 'discharging'] * self.learner.Gamma**k if isinstance(vehicle.reward['discharging'], np.ndarray): vehicle.reward['discharging'] = vehicle.reward['charging'][0] lg.info( f'Warning!!!Charging state of vehicle {vehicle.id} is {vehicle.charge_state} at {self.env.now} ' ) self.discharging_demand_generated.append(vehicle.discharging_demand)
def run_vehicle(self, vehicle): while True: if self.env.now == 0: self.env.process(self.parking_task(vehicle)) event_trip_end = vehicle.trip_end event_charging_end = vehicle.charging_end event_trip_cancellation = vehicle.trip_cancellation event_relocating_end = vehicle.relocating_end event_discharging_end = vehicle.discharging_end events = yield event_trip_end | event_charging_end \ | event_relocating_end | event_discharging_end | event_trip_cancellation if event_trip_end in events: lg.info(f'A vehicle gets idle at {self.env.now}') action = self.charge_check(vehicle) if action in np.arange(16): self.env.process(self.charge_task(vehicle, action)) yield self.env.timeout(0.001) elif action == 16: self.env.process(self.discharge_task(vehicle)) yield self.env.timeout(0.001) else: if self.relocate_check(vehicle): self.relocate_task(vehicle) yield self.env.timeout(0.001) else: self.env.process(self.parking_task(vehicle)) yield self.env.timeout(0.001) if event_charging_end in events: lg.info(f'A vehicle gets charged at {self.env.now}') if self.relocate_check(vehicle): self.relocate_task(vehicle) yield self.env.timeout(0.001) else: self.env.process(self.parking_task(vehicle)) yield self.env.timeout(0.001) if event_discharging_end in events: lg.info(f'A vehicle gets discharged at {self.env.now}') if self.relocate_check(vehicle): self.relocate_task(vehicle) yield self.env.timeout(0.001) else: self.env.process(self.parking_task(vehicle)) yield self.env.timeout(0.001) if event_trip_cancellation in events: lg.info(f'Trip gets canceled at {self.env.now}') self.env.process(self.parking_task(vehicle)) yield self.env.timeout(0.001) if event_relocating_end in events: lg.info( f'vehicle {vehicle.id} finishes relocating at {self.env.now}' ) self.env.process(self.parking_task(vehicle)) yield self.env.timeout(0.001)
def drop_off(self, trip): self.mode = 'idle' self.charge_state -= self.charge_consumption_dropoff self.location = trip.destination self.position = find_zone(self.location, zones) lg.info( f'Vehicle {self.id} drops off the user {trip.id} at {self.env.now}' )
def pick_up(self, trip): self.mode = 'active' lg.info( f'Vehicle {self.id} picks up the user {trip.id} at {self.env.now}') self.charge_state -= self.charge_consumption_pickup trip.info['pickup_time'] = self.env.now trip.info['waiting_time'] = trip.info['pickup_time'] - trip.info[ 'arrival_time'] self.location = trip.origin
def relocate(self, target_zone): distance_duration = self.location.distance(target_zone.centre) distance_to_target = distance_duration[0] self.location = target_zone.centre self.time_to_relocate = distance_duration[1] self.charge_consumption_relocate = self.SOC_consumption( distance_to_target) lg.info(f'Vehicle {self.id} is relocated to the zone {target_zone.id}') self.mode = 'relocating'
def take_action(self, vehicle, charging_stations, vehicles, waiting_list): epsilon = epsilon_decay(self.episode) state = self.get_state(vehicle, charging_stations, vehicles, waiting_list) vehicle.old_location = vehicle.location if np.random.random() > epsilon: if vehicle.charge_state > 70: if state[5] >= 1: action = np.argmax( self.q_table.loc[state, ['0', '1', '2', '3', '4']]) else: action = np.argmax(self.q_table.loc[state, ['0', '2', '4']]) if action == 1: action = 2 elif action == 2: action = 4 else: if state[5] >= 1: action = np.argmax(self.q_table.loc[state, ['0', '1', '2', '4']]) if action == 3: action = 4 else: action = np.argmax(self.q_table.loc[state, ['0', '2', '4']]) if action == 1: action = 2 elif action == 2: action = 4 else: if vehicle.charge_state > 70: if state[5] >= 1: action = np.random.choice([0, 1, 2, 3, 4]) else: action = np.random.choice([0, 2, 4]) else: if state[5] >= 1: action = np.random.choice([0, 1, 2, 4]) else: action = np.random.choice([0, 2, 4]) vehicle.old_state = state vehicle.old_action = action vehicle.old_time = self.env.now vehicle.reward['revenue'] = 0 vehicle.reward['distance'] = 0 vehicle.reward['charging'] = 0 vehicle.reward['queue'] = 0 vehicle.reward['parking'] = 0 vehicle.reward['missed'] = 0 vehicle.reward['discharging'] = 0 lg.info( f'new_action={action}, new_state={state}, {vehicle.charging_count}' ) return action
def discharging(self, charging_station): self.mode = 'discharging' self.charge_state -= self.SOC_consumption(self.distance_to_CS) self.discharging_threshold = 50 discharge_rate = charging_station.power self.discharge_duration = ( ((self.charge_state - self.discharging_threshold) * self.battery_capacity / 100) / discharge_rate) self.location = charging_station.location self.position = find_zone(self.location, zones) lg.info(f'Vehicle {self.id} enters the station at {self.env.now}')
def send_parking(self, parking): self.mode = 'ertp' if self.env.now != 0: lg.info( f'Vehicle {self.id} is sent to the parking {parking.id} at {self.env.now}' ) distance_duration = self.location.distance(parking.location) self.distance_to_parking = distance_duration[0] self.time_to_parking = distance_duration[1] charge_consumption_to_parking = self.SOC_consumption( self.distance_to_parking) self.charge_state -= charge_consumption_to_parking
def send(self, trip): self.mode = 'locked' distance_duration = self.location.distance(trip.origin) distance_to_pickup = distance_duration[0] distance_to_dropoff = trip.distance self.time_to_pickup = distance_duration[1] self.charge_consumption_pickup = self.SOC_consumption( distance_to_pickup) self.charge_consumption_dropoff = self.SOC_consumption( distance_to_dropoff) trip.info['assigned_time'] = self.env.now self.rental_time = trip.duration lg.info(f'Vehicle {self.id} is sent to the request {trip.id}')
def trip_generation(self, zone): j = 0 while True: j += 1 trip = Trip(self.env, (j, zone.id), zone) yield self.env.timeout(trip.interarrival) self.trip_start.succeed() self.trip_start = self.env.event() self.trip = trip trip.info['arrival_time'] = self.env.now self.waiting_list.append(trip) lg.info(f'Trip {trip.id} is received at {self.env.now}') trip.start_time = self.env.now
def send_charge(self, charging_station): self.mode = 'ertc' lg.info(f'Charging state of vehicle {self.id} is {self.charge_state}') if self.action == 0: lg.info( f'Vehicle {self.id} is sent to the closest charging station ' f'{charging_station.id} at {self.env.now}') if self.action == 1: lg.info( f'Vehicle {self.id} is sent to the closest free charging station {charging_station.id} ' f'at {self.env.now}') if self.action == 2: lg.info( f'Vehicle {self.id} is sent to the closest fast charging station {charging_station.id} ' f'at {self.env.now}') distance_duration = self.location.distance(charging_station.location) self.distance_to_CS = distance_duration[0] self.reward['distance'] += distance_duration[0] k = ceil((self.env.now - self.decision_time) / 15) self.reward['distance'] = self.reward['distance'] * 0.9**k if isinstance(self.reward['distance'], np.ndarray): self.reward['distance'] = self.reward['distance'][0] self.time_to_CS = distance_duration[1] self.position = find_zone(self.location, zones)
def finish_discharging(self, charging_station): self.mode = 'idle' for j in range(0, 24): if j * 60 <= self.env.now % 1440 <= (j + 1) * 60: h = j self.reward['discharging'] += ( self.charge_state - self.discharging_threshold) / 100 * 50 * charging_cost[h] / 100 self.profit += (self.charge_state - self.discharging_threshold ) / 100 * 50 * charging_cost[h] / 100 k = ceil((self.env.now - self.decision_time) / 15) self.reward['discharging'] = self.reward['discharging'] * 0.9**k if isinstance(self.reward['charging'], np.ndarray): self.reward['discharging'] = self.reward['discharging'][0] self.charge_state -= (charging_station.power * self.discharge_duration ) / (self.battery_capacity / 100) lg.info( f'Finished discharging, Charging state of vehicle {self.id} is {self.charge_state} at {self.env.now}' )
def update_value(self, vehicle, charging_stations, vehicles, waiting_list): self.q_table.loc[vehicle.old_state, f'counter_{vehicle.old_action}'] += 1 a = self.q_table.loc[vehicle.old_state, f'counter_{vehicle.old_action}'] alpha = 1 / a GAMMA = self.Gamma state = self.get_state(vehicle, charging_stations, vehicles, waiting_list) if vehicle.charge_state > 70: if state[5] >= 1: q = float( max(self.q_table.loc[state, ['0', '1', '2', '3', '4']])) else: q = float(max(self.q_table.loc[state, ['0', '2', '4']])) else: if state[5] >= 1: q = float(max(self.q_table.loc[state, ['0', '1', '2', '4']])) else: q = float(max(self.q_table.loc[state, ['0', '2', '4']])) vehicle.r = float( -(vehicle.reward['charging'] + vehicle.reward['distance'] * 0.80 - vehicle.reward['revenue'] - vehicle.reward['discharging'] * 0.3 + vehicle.reward['queue'] / 30 + vehicle.reward['parking'] / 120 + vehicle.reward['missed'])) vehicle.total_rewards['state'].append(vehicle.old_state) vehicle.total_rewards['action'].append(vehicle.old_action) vehicle.total_rewards['reward'].append(vehicle.r) vehicle.final_reward += vehicle.r # what if it changed meanwhile? vehicle.old_q = self.q_table.loc[vehicle.old_state, f'{vehicle.old_action}'] k = ceil((self.env.now - vehicle.decision_time) / 15) self.q_table.loc[vehicle.old_state, f'{vehicle.old_action}'] = vehicle.old_q + \ alpha * (vehicle.r + ( GAMMA ** k) * q - vehicle.old_q) lg.info( f'old_action={vehicle.old_action}, old_state={vehicle.old_state}, new_state={state}, {vehicle.r}' )
def charging(self, charging_station): self.mode = 'charging' charge_consumption_to_charging = self.SOC_consumption( self.distance_to_CS) self.charge_state -= charge_consumption_to_charging time = self.env.now if time % 1440 < 0.25 * 1440: self.charging_threshold = 100 elif time % 1440 < 0.50 * 1440: self.charging_threshold = 100 elif time % 1440 < 0.75 * 1440: self.charging_threshold = 100 else: self.charging_threshold = 100 self.charge_duration = ( ((self.charging_threshold - self.charge_state) * self.battery_capacity / 100) / charging_station.power) self.location = charging_station.location self.position = find_zone(self.location, zones) lg.info(f'Vehicle {self.id} enters the station at {self.env.now}')
def charge_task(self, vehicle, action): # finding the charging station based on the charging decision # action 0 ==> closest CS # action 1 ==> closest free CS # action 2 ==> closest fast CS for i in range(len(self.charging_stations)): if action == i: charging_station = self.charging_stations[i] # vehicle sends to the CS and enters the queue using a priority coming from its SOC yield self.env.process(self.start_charge(charging_station, vehicle)) prio = int((vehicle.charge_state - vehicle.charge_state % 10) / 10) if isinstance(prio, np.ndarray): prio = prio[0] req = charging_station.plugs.request(priority=prio) vehicle.mode = 'queue' # the vehicle either starts charging after the queue or interrupt the queue if it matches with a request events = yield req | vehicle.queue_interruption # start charging if it does not assigns to a request if req in events: vehicle.charging_demand['time_start'] = self.env.now lg.info(f'Vehicle {vehicle.id} starts charging at {self.env.now}') vehicle.t_start_charging = self.env.now vehicle.reward['queue'] += (vehicle.t_start_charging - vehicle.t_arriving_CS) k = ceil((self.env.now - vehicle.decision_time) / 15) vehicle.reward[ 'queue'] = vehicle.reward['queue'] * self.learner.Gamma**k if isinstance(vehicle.reward['queue'], np.ndarray): vehicle.reward['queue'] = vehicle.reward['queue'][0] vehicle.mode = 'charging' charging = self.env.process( self.finish_charge(charging_station, vehicle)) # even while charging it can matches with a request (but with more costs) yield charging | vehicle.charging_interruption charging_station.plugs.release(req) req.cancel() # if it interrupts before finishing the charging event, we need to update everything if not charging.triggered: charging.interrupt() lg.info( f'Vehicle {vehicle.id} stops charging at {self.env.now}') return # if it interrupts the queue before the charging is being started, we need to update everything else: vehicle.reward['queue'] += (self.env.now - vehicle.t_arriving_CS) k = ceil((self.env.now - vehicle.decision_time) / 15) vehicle.reward[ 'queue'] = vehicle.reward['queue'] * self.learner.Gamma**k if isinstance(vehicle.reward['queue'], np.ndarray): vehicle.reward['queue'] = vehicle.reward['queue'][0] vehicle.charging_demand['SOC_end'] = vehicle.charge_state lg.info(f'vehicle {vehicle.id} interrupts the queue') req.cancel() charging_station.plugs.release(req) return
def take_action(self, vehicle, charging_stations, vehicles, waiting_list, env, episode): state = self.get_state(vehicle, charging_stations, vehicles, waiting_list, env) state = state.reshape((1, len(state))) lg.info(f'new_state={state}, {vehicle.charging_count}') action = self.act(state, episode) vehicle.old_location = vehicle.location lg.info( f'new_action={action}, new_state={state}, {vehicle.charging_count}' ) vehicle.r = float( -(vehicle.reward['charging'] + vehicle.reward['distance'] * 0.80 - vehicle.reward['revenue'] - vehicle.reward['discharging'] + vehicle.reward['queue'] / 30 + vehicle.reward['parking'] / 120 + vehicle.reward['missed'])) reward = vehicle.r vehicle.final_reward += vehicle.r if vehicle.old_state is not None: period = env.now - vehicle.old_time self.store(vehicle.old_state, vehicle.old_action, reward, state, period) if len(self.expirience_replay) > self.batch_size: if len(self.expirience_replay) % 10 == 1: self.retrain(self.batch_size) if len(self.expirience_replay) % 50 == 1: self.alighn_target_model() vehicle.old_time = env.now vehicle.old_state = state vehicle.old_action = action vehicle.reward['revenue'] = 0 vehicle.reward['distance'] = 0 vehicle.reward['charging'] = 0 vehicle.reward['queue'] = 0 vehicle.reward['parking'] = 0 vehicle.reward['missed'] = 0 vehicle.reward['discharging'] = 0 return action
def start_discharge(self, charging_station, vehicle): vehicle.send_charge(charging_station) lg.info(f'vehicle {vehicle.id} is sent for discharging') # we need this information for CS planning problem vehicle.discharging_demand = dict( vehicle_id=vehicle.id, time_send=self.env.now, time_enter=self.env.now + vehicle.time_to_CS, time_start=None, SOC_end=None, SOC_send=vehicle.charge_state, lat=vehicle.location.lat, long=vehicle.location.long, v_hex=vehicle.position.hexagon, CS_location=[ charging_station.location.lat, charging_station.location.long ], v_position=vehicle.position.id, CS_position=charging_station.id, distance=vehicle.location.distance(charging_station.location)[0]) yield self.env.timeout(vehicle.time_to_CS) vehicle.discharging(charging_station) vehicle.t_arriving_CS = self.env.now
def discharge_task(self, vehicle): # vehicle sends for discharging only if it has more than 70% charge and there is free CS if vehicle.charge_state > 70: free_CS = [ x for x in self.charging_stations if x.plugs.count < x.capacity ] if len(free_CS) >= 1: charging_station = closest_facility(free_CS, vehicle) else: charging_station = closest_facility(self.charging_stations, vehicle) yield self.env.process( self.start_discharge(charging_station, vehicle)) # it enters the station with a priority req = charging_station.plugs.request(priority=5) vehicle.mode = 'queue' # the vehicle either starts discharging after the queue or interrupt the queue if it matches with a request events = yield req | vehicle.queue_interruption if req in events: vehicle.discharging_demand['time_start'] = self.env.now lg.info( f'Vehicle {vehicle.id} starts discharging at {self.env.now}' ) vehicle.t_start_discharging = self.env.now vehicle.mode = 'discharging' discharging = self.env.process( self.finish_discharge(vehicle=vehicle, charging_station=charging_station)) # even while charging it can matches with a request (but with more costs) yield discharging | vehicle.discharging_interruption charging_station.plugs.release(req) req.cancel() vehicle.discharging_demand['SOC_end'] = vehicle.charge_state # if it interrupts before finishing the charging event, we need to update everything if not discharging.triggered: discharging.interrupt() lg.info( f'Vehicle {vehicle.id} stops discharging at {self.env.now}' ) return else: lg.info(f'vehicle {vehicle.id} interrupts the queue') req.cancel() charging_station.plugs.release(req) return else: return
def park(self, vehicle, parking): # all vehicles must start from parking if self.env.now <= 5: vehicle.parking(parking) yield vehicle.parking_stop else: # each vehicle cruise 10 mins before sending to parking if vehicle.mode == 'idle': vehicle.mode = 'circling' lg.info( f'vehicle {vehicle.id} starts cruising at {self.env.now}') circling_interruption = vehicle.circling_stop vehicle.t_start_circling = self.env.now circling_finish = self.env.timeout(10.5) parking_events = yield circling_interruption | circling_finish # vehicle sends to the request if there is any if circling_interruption in parking_events: vehicle.charge_state -= vehicle.SOC_consumption(10 * (15 / 60)) lg.info( f'vehicle {vehicle.id} interrupts cruising at {self.env.now}' ) # vehicle sends to the parking if there is no request elif circling_finish in parking_events: if vehicle.mode == 'circling': lg.info( f'vehicle {vehicle.id} stops cruising at {self.env.now}' ) circling_time = max( float(self.env.now - vehicle.t_start_circling), 0) vehicle.charge_state -= vehicle.SOC_consumption( circling_time * (15 / 60)) vehicle.send_parking(parking) yield self.env.timeout(vehicle.time_to_parking) t_start_parking = self.env.now vehicle.parking(parking) # vehicle stays in parking until it assigns to a request or send for charging/relocating yield vehicle.parking_stop vehicle.reward['parking'] += (self.env.now - t_start_parking) k = ceil((self.env.now - vehicle.decision_time) / 15) vehicle.reward['parking'] = vehicle.reward[ 'parking'] * self.learner.Gamma**k
def parking(self, parking): self.mode = 'parking' self.location = parking.location self.position = find_zone(self.location, zones) if self.env.now >= 5: lg.info(f'Vehicle {self.id} starts parking at {self.env.now}')
def missed_trip(self): while True: for trip in self.waiting_list: if trip.mode == 'unassigned' and self.env.now > ( trip.start_time + 3): r = random.uniform(0, 1) if r < 0.1: trip.mode = 'missed' trip.info['mode'] = 'missed' self.trip_list.append(trip) self.waiting_list.remove(trip) lg.info(f'trip {trip.id} is missed at {self.env.now}') elif trip.mode == 'unassigned' and self.env.now > ( trip.start_time + 5): r = random.uniform(0, 1) if r < 0.5: trip.mode = 'missed' trip.info['mode'] = 'missed' self.trip_list.append(trip) self.waiting_list.remove(trip) lg.info(f'trip {trip.id} is missed at {self.env.now}') elif trip.mode == 'unassigned' and self.env.now > ( trip.start_time + 10): trip.mode = 'missed' trip.info['mode'] = 'missed' self.trip_list.append(trip) self.waiting_list.remove(trip) lg.info(f'trip {trip.id} is missed at {self.env.now}') if trip.mode == 'missed': vehicle_responsible = [ x for x in self.vehicles if x.mode in ['charging', 'discharging', 'queue', 'ertc'] and x.old_location.distance_1(trip.origin) <= 10 and x.charge_state > 30 ] vehicle_responsible_0 = [ x for x in self.vehicles if x.location.distance_1(trip.origin) <= 10 and x.mode in ['charging', 'discharging', 'queue', 'ertc'] and x.charge_state > 30 ] vehicle_responsible_1 = ([ x for x in self.vehicles if x.location.distance_1(trip.origin) <= 10 and x.mode in ['idle', 'parking'] and x.charge_state <= 25 and x.charging_count > 0 ]) for vehicle in vehicle_responsible: vehicle.reward['missed'] += 50 k = ceil((self.env.now - vehicle.decision_time) / 15) vehicle.reward['missed'] = vehicle.reward[ 'missed'] * self.learner.Gamma**k for vehicle in vehicle_responsible_0: vehicle.reward['missed'] += 50 k = ceil((self.env.now - vehicle.decision_time) / 15) vehicle.reward['missed'] = vehicle.reward[ 'missed'] * self.learner.Gamma**k for vehicle in vehicle_responsible_1: vehicle.reward['missed'] += 50 k = ceil((self.env.now - vehicle.decision_time) / 15) vehicle.reward['missed'] = vehicle.reward[ 'missed'] * self.learner.Gamma**k yield self.env.timeout(1)
def matching(vehicles, trips): try: mdl = Model("CS_development") vehicle_range = [] for i in vehicles: vehicle_range.append(i.id) trip_range = [] for j in trips: trip_range.append(j.id) d = {} for i in vehicles: for j in trips: d[i.id, j.id] = i.location.distance_1(j.origin) if i.mode in ['charging', 'discharging']: d[i.id, j.id] += 2 if isinstance(d[i.id, j.id], list): d[i.id, j.id] = float([i.id, j.id][0]) l = {} for j in trips: l[j.id] = j.distance if isinstance(l[j.id], list): l[j.id] = float(l[j.id][0]) p = {} for j in trips: p[j.id] = j.revenue if isinstance(p[j.id], list): p[j.id] = float(p[j.id][0]) SOC = {} for i in vehicles: SOC[i.id] = i.charge_state if isinstance(SOC[i.id], list): SOC[i.id] = float([i.id][0]) x = mdl.binary_var_matrix(vehicle_range, trip_range, name='x') for i in vehicles: mdl.add_constraint( mdl.sum(x[i.id, j.id] for j in trips) <= 1, 'C1') for j in trips: mdl.add_constraint( mdl.sum(x[i.id, j.id] for i in vehicles) <= 1, 'C2') for j in trips: for i in vehicles: mdl.add_constraint( x[i.id, j.id] * (d[i.id, j.id] + l[j.id]) * 0.4 <= SOC[i.id] - 20, 'C2') mdl.maximize( mdl.sum(x[i.id, j.id] * (p[j.id] - d[i.id, j.id] * 0.01) for i in vehicles for j in trips)) mdl.solve() #mdl.report() pairs = [] for i in vehicles: for j in trips: if x[i.id, j.id].solution_value != 0: pairs.append(dict(vehicle=i, trip=j)) lg.info( f'NoT = {len(trips)}, NoV = {len(vehicles)}, NoM = {len(pairs)}') except: lg.info('Solving the model fails') pairs = [] for trip in trips: available_vehicles = available_vehicle(vehicles, trip) distances = [ vehicle.location.distance_1(trip.origin) for vehicle in available_vehicles ] # If there is no available vehicle, add the trip to the waiting list if len(available_vehicles) >= 1: vehicle = [ x for x in available_vehicles if x.location.distance_1(trip.origin) == min(distances) ][0] vehicles.remove(vehicle) pairs.append(dict(vehicle=vehicle, trip=trip)) lg.info( f'NoT = {len(trips)}, NoV = {len(vehicles)}, NoM = {len(pairs)}') return pairs