示例#1
0
 def setUp(self) -> None:
     self.world = classes.World(
         40,
         policy=decision.SwapAllPolicy(),
         initial_state=clustering.scripts.get_initial_state(100, 20),
         visualize=False,
     )
示例#2
0
 def setUp(self) -> None:
     self.world = classes.World(
         shift_duration=BATTERY_INVENTORY * SWAP_TIME_PER_BATTERY + 1,
         sample_size=100,
         number_of_clusters=10,
         initial_state=get_initial_state(500),
     )
     self.world.state.current_location = self.world.state.depots[0]
示例#3
0
 def setUp(self) -> None:
     self.world = classes.World(
         80,
         None,
         clustering.scripts.get_initial_state(100, 10),
         visualize=False,
         verbose=False,
         NUMBER_OF_NEIGHBOURS=5,
         TRAINING_SHIFTS_BEFORE_SAVE=1,
         MODELS_TO_BE_SAVED=2,
     )
示例#4
0
 def setUp(self) -> None:
     self.world = classes.World(
         shift_duration=VAN_BATTERY_INVENTORY * SWAP_TIME_PER_BATTERY + 1,
         policy=decision.SwapAllPolicy(),
         initial_state=clustering.scripts.get_initial_state(
             sample_size=100,
             number_of_clusters=10,
             initial_location_depot=True),
         verbose=False,
         visualize=False,
     )
     self.vehicle = self.world.state.vehicles[0]
示例#5
0
def run_analysis(
    shift_duration=100,
    sample_size=100,
    number_of_clusters=10,
    policies=None,
    visualize_world=False,
    smooth_curve=True,
    verbose=False,
):
    """
    Method to run different policies and analysis their performance
    :param shift_duration: total shift to be analysed
    :param sample_size: size of instances
    :param number_of_clusters: number of clusters in the world
    :param policies: different policies to be analysed
    :param visualize_world: boolean - if the running of the world should be visualized
    :param smooth_curve: boolean - if the analysed metrics is to be smoothed out in the analysis plot
    :return: matplotlib figure - figure containing plot of the analysis
    """
    instances = []
    # loop over all policies to be analysed - default RandomRolloutPolicy if no policy is given
    for policy in policies if policies else ["RandomRolloutPolicy"]:
        print(f"\n---------- {policy} ----------")
        # create the world object with given input parameters
        world = classes.World(
            shift_duration,
            sample_size=sample_size,
            number_of_clusters=number_of_clusters,
            policy=policy,
            verbose=verbose,
        )
        # pumping up the trip intensity
        for cluster in world.state.clusters:
            cluster.trip_intensity_per_iteration = round(cluster.ideal_state *
                                                         0.1)
        # add scooter trip generation event and a vehicle arrival event
        world.add_event(classes.GenerateScooterTrips(0))
        world.add_event(
            classes.VehicleArrival(0, world.state.current_location.id,
                                   visualize_world))
        # run the world and add the world object to a list containing all world instances
        world.run()
        instances.append(world)

    # visualize the world instances that have been run
    figure = visualize_analysis(instances, policies, smooth_curve)

    return figure
def training(input_arguments, suffix):
    SAMPLE_SIZE = 2500
    action_interval, number_of_neighbours = input_arguments
    world_to_analyse = classes.World(
        960,
        None,
        clustering.scripts.get_initial_state(
            SAMPLE_SIZE,
            50,
            number_of_vans=2,
            number_of_bikes=0,
        ),
        verbose=False,
        visualize=False,
        MODELS_TO_BE_SAVED=1,
        TRAINING_SHIFTS_BEFORE_SAVE=50,
        ANN_LEARNING_RATE=0.0001,
        ANN_NETWORK_STRUCTURE=[1000, 2000, 100],
        REPLAY_BUFFER_SIZE=100,
        NUMBER_OF_NEIGHBOURS=number_of_neighbours,
        DIVIDE_GET_POSSIBLE_ACTIONS=action_interval,
    )
    world_to_analyse.policy = world_to_analyse.set_policy(
        policy_class=decision.EpsilonGreedyValueFunctionPolicy,
        value_function_class=decision.value_functions.ANNValueFunction,
    )
    for cluster in world_to_analyse.state.clusters:
        cluster.scooters = cluster.scooters[: round(len(cluster.scooters) * 0.6)]
        cluster.ideal_state = round(cluster.ideal_state * 0.6)
    decision_times = [train_value_function(world_to_analyse, save_suffix=f"{suffix}")]

    df = pd.DataFrame(
        decision_times,
        columns=["Avg. time per shift"],
    )

    if not os.path.exists("computational_study"):
        os.makedirs("computational_study")

    df.to_excel(
        f"computational_study/training_time_ai{action_interval}_nn{number_of_neighbours}.xlsx"
    )
示例#7
0
def estimate_reward(
    state, remaining_shift_duration: int, number_of_simulations=NUMBER_OF_ROLLOUTS,
):
    """
    Does n times scenario simulations and returns the highest conducted reward from simulation
    :param state: State - state to de the simulations from
    :param remaining_shift_duration: int - time left on shift = length of simulation
    :param number_of_simulations: int - number of simulations to be performed (default = 10)
    :return: int - maximum reward from simulations
    """

    all_rewards = []

    # Do n scenario simulations
    for i in range(number_of_simulations):
        simulation_counter = 1
        world = classes.World(
            remaining_shift_duration, initial_state=copy.deepcopy(state)
        )
        next_is_vehicle_action = True
        # Simulate until shift ends
        while world.time < remaining_shift_duration:
            if next_is_vehicle_action:
                action = policies.RandomActionPolicy.get_best_action(world)
                # TODO action time doesn't take into account the time of battery change in depot
                world.time = world.time + action.get_action_time(
                    world.state.get_distance_id(
                        world.state.current_location.id, action.next_location
                    )
                )
                world.add_reward(world.state.do_action(action), discount=True)
            else:
                _, _, lost_demand = world.state.system_simulate()
                world.add_reward(lost_demand * LOST_TRIP_REWARD, discount=True)
                simulation_counter += 1
            next_is_vehicle_action = (
                world.time < simulation_counter * ITERATION_LENGTH_MINUTES
            )

        all_rewards.append(world.get_total_reward())

    return max(all_rewards)
示例#8
0
def heatmap():
    import folium
    import classes
    import clustering.scripts
    from folium.plugins import HeatMap

    map_hooray = folium.Map(location=[59.925586, 10.730721], zoom_start=13)

    world_to_analyse = classes.World(
        960,
        None,
        clustering.scripts.get_initial_state(
            2500,
            50,
            number_of_vans=2,
            number_of_bikes=0,
        ),
        verbose=False,
        visualize=False,
        MODELS_TO_BE_SAVED=5,
        TRAINING_SHIFTS_BEFORE_SAVE=50,
        ANN_LEARNING_RATE=0.0001,
        ANN_NETWORK_STRUCTURE=[1000, 2000, 1000, 200],
        REPLAY_BUFFER_SIZE=100,
        test_parameter_name="dr_ltr",
    )
    percentage = 1500 / 2500
    all_coordinates = []
    for cluster in world_to_analyse.state.clusters:
        cluster.scooters = cluster.scooters[:round(
            len(cluster.scooters) * percentage)]
        cluster.ideal_state = round(cluster.ideal_state * percentage)
        for scooter in cluster.scooters:
            all_coordinates.append(scooter.get_location())

    HeatMap(all_coordinates).add_to(map_hooray)

    # Display the map
    map_hooray.save("heatmap.HTML")
示例#9
0
#!/usr/bin/env python3

import classes

t = classes.World()
t.set('hello world')
print(t.greet())

t.many(['Hello', 'World', 'And', 'Another', 'String', 'Instide', 'A List'])
print(t.greet())
示例#10
0
def init_world():
    """ Initializes World object """
    world = classes.World({})
    return world
    import os

    SAMPLE_SIZE = 2500
    NUMBER_OF_CLUSTERS = [10, 20, 30, 50, 75, 100, 200, 300]
    standard_parameters = globals.HyperParameters()
    decision_times = []
    for num_clusters in NUMBER_OF_CLUSTERS:
        world_to_analyse = classes.World(
            960,
            None,
            clustering.scripts.get_initial_state(
                SAMPLE_SIZE,
                num_clusters,
                number_of_vans=2,
                number_of_bikes=0,
            ),
            verbose=False,
            visualize=False,
            MODELS_TO_BE_SAVED=1,
            TRAINING_SHIFTS_BEFORE_SAVE=10,
            ANN_LEARNING_RATE=0.0001,
            ANN_NETWORK_STRUCTURE=[1000, 2000, 1000, 200],
            REPLAY_BUFFER_SIZE=64,
        )
        world_to_analyse.policy = world_to_analyse.set_policy(
            policy_class=decision.EpsilonGreedyValueFunctionPolicy,
            value_function_class=decision.value_functions.ANNValueFunction,
        )

        decision_times.append(train_value_function(world_to_analyse))
示例#12
0
 def setUp(self) -> None:
     self.world = classes.World(1, None, get_initial_state(2500, 50))
示例#13
0
        next_is_vehicle_action = (
            world.time < simulation_counter * ITERATION_LENGTH_MINUTES)

    return world


if __name__ == "__main__":

    import classes
    import decision.value_functions
    import clustering.scripts

    world_to_analyse = classes.World(
        960,
        None,
        clustering.scripts.get_initial_state(
            2500,
            50,
            number_of_vans=3,
            number_of_bikes=0,
        ),
        verbose=False,
        visualize=False,
    )
    world_to_analyse.policy = world_to_analyse.set_policy(
        policy_class=decision.EpsilonGreedyValueFunctionPolicy,
        value_function_class=decision.value_functions.ANNValueFunction,
    )

    training_simulation(world_to_analyse)
示例#14
0
#All fun and games. Nothing serious happening here. Just messing around to see if the various functions work

import classes
import simulation_functions
import create

people = create.create_people("people.csv")
events = create.create_events("events.csv")
new_world = classes.World(events[0:3], people)
round_turn = 0


def go(steps, round_turn=round_turn):
    for i in range(0, steps):
        round_turn += 1
        print("Round", round_turn)
        print()
        simulation_functions.sim_world(new_world)
        print()
        print()
示例#15
0
文件: bot.py 项目: DaRealNim/MCBot
def main():
	global PROT_VERSION, HOST, PORT, NAME, SELF, WORLD, SENDPOSTHREAD, CURRENTORDER, ACTIONLIST
	s = socket.socket()
	s.connect((HOST, PORT))
	p = proto.craft_packet(0x00, proto.pack_varint(PROT_VERSION) + proto.pack_string(HOST) + pack(">H",PORT) + b"\x02")
	s.send(p)
	
	p = proto.craft_packet(0x00, proto.pack_string(NAME))
	s.send(p)
	
	Thread(target=executeActions, args=(s,)).start()
	
	while True:
		plength = proto.unpack_varint(s)-1
		pid = proto.unpack_varint(s)
		if pid == 0x21:
			# print("Responding to Keep-Alive...")
			data = s.recv(plength)
			p = proto.craft_packet(0x0F, data)
			s.send(p)
		elif pid == 0x26:
			eid = unpack(">i",s.recv(4))
			gamemode = unpack(">B",s.recv(1))
			dimension = unpack(">i",s.recv(4))
			hseed = unpack(">q",s.recv(8))
			mplayer = unpack(">B",s.recv(1))
			lvltype = proto.recv_string(s)
			viewdist = proto.unpack_varint(s)
			rdi = unpack(">?",s.recv(1))
			ers = unpack(">?",s.recv(1))
			SELF = classes.Player(eid, gamemode, dimension)
			WORLD = classes.World(hseed, lvltype, viewdist, ers)
			print(SELF.eid, SELF.gamemode, SELF.dimension)
			print(WORLD.leveltype, WORLD.viewdistance, WORLD.enablerespawnscreen)
		elif pid == 0x0F:
			json_data = proto.recv_string(s)
			pos = unpack(">B", s.recv(1))
			j = json.loads(json_data)
			
			sender = j["with"][0]["text"]
			content = j["with"][1]
			print("From %s: %s"%(sender, content))
			
			try:
				if content.startswith(NAME):
					msgl = content.split()
					if msgl[1] == "introduce":
						p = proto.craft_packet(0x03, proto.pack_string("Hi, my name is %s."%NAME))
						s.send(p)
					elif msgl[1] == "move":
						print("MOVING")
						CURRENTORDER = "move"
						ACTIONLIST.append(["MOVE", (float(msgl[2]), float(msgl[3]))])
					elif msgl[1] == "stop":
						print("STOPPING")
						CURRENTORDER = "stop"
					elif msgl[1] == "sitrep":
						p = proto.craft_packet(0x03, proto.pack_string( "(%.2f, %.2f, %.2f)"%(SELF.x, SELF.y, SELF.z) ))
						s.send(p)
					elif msgl[1] == "strafe":
						SELF.x += 0.3
			except AttributeError:
				pass
					
		elif pid == 0x36:
			
			x = unpack(">d", s.recv(8))[0]
			y = unpack(">d", s.recv(8))[0]
			z = unpack(">d", s.recv(8))[0]
			yaw = unpack(">f", s.recv(4))[0]
			pitch = unpack(">f", s.recv(4))[0]
			flags = unpack(">B", s.recv(1))[0]
			teleport_id = proto.unpack_varint(s)
			p = proto.craft_packet(0x00, proto.pack_varint(teleport_id))
			s.send(p)
			print("GOT YOUR POSITION (%.2f, %.2f, %.2f). If you see this message more than once, something is wrong."%(x, y, z))
			SELF.x = x
			SELF.y = y
			SELF.z = z
			SELF.yaw = yaw
			SELF.pitch = pitch
			# print("Position updated: (%.2f, %.2f, %.2f)"%(x,y,z))
			# print("yaw/pitch : (%.2f/%.2f)"%(yaw, pitch))
			if SENDPOSTHREAD == None:
				SENDPOSTHREAD = Thread(target=sendPositionUpdate, args=(s,))
				SENDPOSTHREAD.start()
		
		elif pid == 0x22:
			#UNFINISHED
			#NOTHING WORKS AND MY LIFE IS A PAIN
			data = s.recv(plength)
			
			# # print("Receiving chunk data")
			# chunkx = unpack(">i",s.recv(4))[0]
			# chunkz = unpack(">i",s.recv(4))[0]
			
			# fullchunk = unpack(">?",s.recv(1))[0]
			# if fullchunk:
				# chunk = classes.Chunk(chunkx, chunkz)
			# else:
				# chunk = WORLD.getChunk(chunkx, chunkz)
				
			# readbytes = 9
			
			# primarybitmask = proto.unpack_varint(s)
			# readbytes += len(proto.pack_varint(primarybitmask))
			# heightmap = nbtparser.NBTStruct()
			# heightmap.parseFromSocket(s)
			# heightmap.printPretty()
			
			# readbytes += 2*36*8 + 13 + 15 + 4 + (2+1+1+4)*2
			
			# if fullchunk:
				# biomes = s.recv(1024*4)
				# readbytes+=(1024*4)
			# else:
				# biomes = None
				
			# # print(biomes)
			
			# datasize = proto.unpack_varint(s)
			# readbytes += len(proto.pack_varint(datasize))
			# count = ("{0:b}".format(primarybitmask)).count("1")
			
			# ####
			# data = s.recv(datasize)
			# readbytes += datasize
			# ####
			
			
			# # for i in range(count):
				# # chunksection = classes.ChunkSection()
				# # bloccount = unpack(">H", s.recv(2))[0]
				# # bitsperblock = unpack(">B", s.recv(1))[0]
				
				# # palettetype = "indirect"
				# # if bitsperblock < 4:
					# # bitsperblock = 4
				# # if bitsperblock > 8:
					# # bitsperblock = 14
					# # palettetype = "direct"
				
				# # if palettetype == "indirect":
					# # palette_len = proto.unpack_varint(s)
					# # palette = None
					# # if palette_len:
						# # palette = []
						# # for j in range(palette_len):
							# # palette.append(proto.unpack_varint(s))
				# # datalongs = proto.unpack_varint(s)
				# # print(datalongs)
				# # datablocks = []
				# # for j in range(datalongs):
					# # datablocks.append(unpack(">Q", s.recv(8))[0])

			# nobe = proto.unpack_varint(s)
			# readbytes += len(proto.pack_varint(nobe))

			# print(chunkx, "/", chunkz)
			# print("Nb of active sections: ",count)
			# blockentities = s.recv(plength-readbytes)

			
			
		else:
			# print("Received unimplemented %s (%d data bytes):"%(hex(pid),plength))
			data = s.recv(plength)