Ejemplo n.º 1
0
 def setUp(self):
     self.field = Field(11, 11)
     predators = [Predator((0, 0)), Predator((0, 10)), Predator((10, 0))]
     prey = Prey((5, 5))
     for pred in predators:
         self.field.add_player(pred)
     self.field.add_player(prey)
Ejemplo n.º 2
0
    def __init__(self, master, *args, **kwargs):
        super().__init__(master=master, *args, **kwargs)
        self.is_paused = True
        self.root = master
        self.width = 700
        self.height = 500

        self.root.geometry("{}x{}".format(self.width, self.height))
        self.root.resizable(False, False)

        self.left_bar = tk.Frame(master=self.root)
        self.left_bar.pack(side=tk.LEFT)

        self.automate_seed_entry_label = tk.Label(self.left_bar,
                                                  text=SEED_INPUT_LABEL)
        self.automate_seed_entry_label.grid()

        self.automate_seed_entry = tk.Entry(self.left_bar)
        self.automate_seed_entry.bind('<Key-Return>', self.set_drawer)
        self.automate_seed_entry.grid()

        self.next_step_button = tk.Button(master=self.left_bar,
                                          text=NEXT_STEP,
                                          command=self.next_step)
        self.next_step_button.grid()

        self.pause_button_text = tk.StringVar(value=START)
        self.pause_button = tk.Button(master=self.left_bar,
                                      textvariable=self.pause_button_text,
                                      command=self.pause)
        self.pause_button.grid()

        self.screenshot_button_text = tk.StringVar(value=SCREENSHOT)
        self.screenshot_button = tk.Button(
            master=self.left_bar,
            textvariable=self.screenshot_button_text,
            command=self.screenshot)
        self.screenshot_button.grid()

        self.canvas_width = 500
        self.canvas_height = 500

        self.canvas = tk.Canvas(master=self.root,
                                cnf={
                                    "width": self.canvas_width,
                                    "height": self.canvas_height
                                })
        self.canvas.pack(
            side=tk.RIGHT)  # place(x=self.width - self.canvas_width)

        field_width = 100
        field_height = 100
        self.field = Field(field_width, field_height, 5)
        self.field.set_cell(field_width // 2, field_height // 2, 1)
        self.cells = self.get_rectangles()

        self.drawer = self.get_drawer(501)
        self.redraw_field()
Ejemplo n.º 3
0
 def test_transition(self, n_runs=1000, rel_delta=0.05):
     """
     test if the transitions are computed correctly based on tripping probabilities.
     :param n_runs: number of runs to test, should not be too low because there is randomness involved
     :param rel_delta: relative plus/minus delta to be accepted as success
     """
     p = Player(location=(0, 0), tripping_prob=0.5)
     f = Field(11, 11)
     f.add_player(p)
     new_states = [f.transition(p, action=(-1, 0)) for i in range(n_runs)]
     c = Counter(new_states)
     print c
     self.assertAlmostEqual(c[c.keys()[0]], n_runs*p.tripping_prob, delta=rel_delta*n_runs)
Ejemplo n.º 4
0
 def test_transition(self, n_runs=1000, rel_delta=0.05):
     """
     test if the transitions are computed correctly based on tripping probabilities.
     :param n_runs: number of runs to test, should not be too low because there is randomness involved
     :param rel_delta: relative plus/minus delta to be accepted as success
     """
     p = Player(location=(0, 0), tripping_prob=0.5)
     f = Field(11, 11)
     f.add_player(p)
     new_states = [f.transition(p, action=(-1, 0)) for i in range(n_runs)]
     c = Counter(new_states)
     print c
     self.assertAlmostEqual(c[c.keys()[0]],
                            n_runs * p.tripping_prob,
                            delta=rel_delta * n_runs)
Ejemplo n.º 5
0
    def parse_fields(self, first_line):
        # Bit-counter for fields smaller than 8b, will get collapsed into offset
        curr_bits = 0

        # Current offset, starting out with the parent region's initial offset
        curr_offset = self.initial_offset

        # Loop till EOF
        for i in range(first_line, len(self.lines)):
            curr = self.lines[i].strip()

            # Stop at closing bracket of block
            if curr == '}': break

            # Apply offset instruction and reset bit counter
            if 'Offset (' in curr:
                curr_offset = int(curr[curr.index('(') + 1:curr.index(')')],
                                  16)
                curr_bits = 0
                continue

            # Create field, only keep track of it if it has a name
            f = Field(curr_offset, i, self.lines)
            if (f.name != '' and f.size >= 8):
                self.fields.append(f)

            # Add past field's size to offset, keep track of remainder
            curr_offset = curr_offset + math.floor(f.size / 8)
            curr_bits = curr_bits + f.size % 8

            # Collapse bit-counter if applicable
            if curr_bits / 8 >= 1:
                off_bytes = math.floor(curr_bits / 8)
                curr_bits = curr_bits - off_bytes * 8
                curr_offset = curr_offset + off_bytes
Ejemplo n.º 6
0
 def setUp(self):
     self.field = Field(11, 11)
     predators = [Predator((0, 0)), Predator((0, 10)), Predator((10, 0))]
     prey = Prey((5, 5))
     for pred in predators:
         self.field.add_player(pred)
     self.field.add_player(prey)
Ejemplo n.º 7
0
 def test_growth(self):
     field_1 = Field()
     crop_1 = Crop(Seed, field_1)
     self.assertEqual(crop_1.height, 0.0)
     crop_1.grow(Weather())
     self.assertEqual(crop_1.height, 0.002)
     self.assertNotEqual(field_1.water_density, 1.0)
Ejemplo n.º 8
0
    def patch_field(self, field, dest, names):
        byte_size = math.floor(field.size / 8)

        # Only patch fields with a 2- or 4 times multiple of 8
        # Greater sizes are taken care of differently
        if byte_size != 2 and byte_size != 4:
            dest.fields.append(field)
            return

        # A field name is four letters in size, try all combinations except 0
        subfields = []
        for i in range(1, 4):

            # Try to create all sub-fields without name collisions
            for j in range(0, byte_size):
                # Create new name by changing character at i to j
                curr_name = field.name[:i] + str(j) + field.name[i + 1:]

                # Collision, stop the loop
                if len(list(filter(lambda x: x == curr_name, names))) != 0:
                    subfields = []
                    break

                # Create subfield
                subfield = Field(field.offset + j, -1, [])
                subfield.name = curr_name
                subfield.size = 8
                subfield.former_name = field.name

                # Append subfield
                names.append(subfield.name)
                subfields.append(subfield)

            # Process was successful, no further trials required
            if len(subfields) == byte_size:
                break

        # Not successful, even after all four tries, exit the program
        if len(subfields) != byte_size:
            print(
                f'Could not create subfields for field {field.name} without name-collisions!'
            )
            sys.exit()

        dest.fields = dest.fields + subfields
        return
Ejemplo n.º 9
0
 def get_parcel(self, world, id):
     current = Field()
     res = self.query("Select vertex from parcel_vertices where parcel='" +
                      id + "' ORDER BY idx;")
     if len(res) == 0:
         raise NotImplementedError("Parcel not found in db")
     for v in res:
         current._vertices.append(world.vertices[v[0]])
     return current
Ejemplo n.º 10
0
	def get(self):
		template_params={}
		mail=self.request.get('mail')
		
		user= User.query(User.mail == mail).get()
		if not user:
			self.error(403)
			self.response.write(' user Error')
			return
		fields=Field.getAllFieldsPerUser(user.mail)
		
		
		template_params['status']="OK"
		template_params['fields']=fields
		self.response.write(json.dumps(template_params))
Ejemplo n.º 11
0
 def test_predators_have_collided(self):
     f = Field(11, 11)
     predator1 = Predator(id="Plato", location=(1, 1))
     predator2 = Predator(id="Pythagoras", location=(1, 1))
     chip = Prey(id="Kant", location=(5, 5))
     f.add_player(predator1)
     f.add_player(predator2)
     f.add_player(chip)
     s = State.state_from_field(f)
     self.assertTrue(s.predators_have_collided())
     self.assertTrue(s.prey_is_caught() == False)
Ejemplo n.º 12
0
def index():
    field_1 = Field()
    crop_1 = Crop(Seed, field_1)
    weather = Weather()
    data = {}
    crop_data = []
    weather_data = []
    for i in range(365):
        weather_data.append({"temperature": weather.get_daily(i)})
        crop_1.grow(weather_data[-1])
        crop_data.append({
            "height": crop_1.height,
            "diameter": crop_1.diameter
        })
    data["crop"] = crop_data
    data["weather"] = weather_data
    return render_template("test.jinja", dataset=data)
Ejemplo n.º 13
0
class TestState(TestCase):
    def setUp(self):
        self.field = Field(11, 11)
        predators = [Predator((0, 0)), Predator((0, 10)), Predator((10, 0))]
        prey = Prey((5, 5))
        for pred in predators:
            self.field.add_player(pred)
        self.field.add_player(prey)

    def test_state_from_field(self):
        state = State.state_from_field(self.field)
        self.assertEqual(state.relative_distances, [(5, 5), (5, -5), (-5, 5)])

    def test_terminal_functions(self):
        state = State.state_from_field(self.field)
        self.assertFalse(state.is_terminal())
        self.assertFalse(state.predators_have_collided())
        self.assertFalse(state.prey_is_caught())

        # Move prey to location of first predator
        self.field.get_prey().location = (0, 0)
        state = State.state_from_field(self.field)
        self.assertTrue(state.is_terminal())
        self.assertFalse(state.predators_have_collided())
        self.assertTrue(state.prey_is_caught())

        # Move predator 1 to location of last predator
        self.field.get_predators()[0].location = (10, 0)
        state = State.state_from_field(self.field)
        self.assertTrue(state.is_terminal())
        self.assertTrue(state.predators_have_collided())
        self.assertFalse(state.prey_is_caught())

    def test_all_states(self):
        all_states = State.all_states(self.field)
        self.assertEqual(len(all_states), 1771561)

    def test_all_states_without_terminal(self):
        states = State.all_states_without_terminal(self.field)
        self.assertEqual(len(states), 1685040)
Ejemplo n.º 14
0
class TestState(TestCase):
    def setUp(self):
        self.field = Field(11, 11)
        predators = [Predator((0, 0)), Predator((0, 10)), Predator((10, 0))]
        prey = Prey((5, 5))
        for pred in predators:
            self.field.add_player(pred)
        self.field.add_player(prey)

    def test_state_from_field(self):
        state = State.state_from_field(self.field)
        self.assertEqual(state.relative_distances, [(5, 5), (5, -5), (-5, 5)])

    def test_terminal_functions(self):
        state = State.state_from_field(self.field)
        self.assertFalse(state.is_terminal())
        self.assertFalse(state.predators_have_collided())
        self.assertFalse(state.prey_is_caught())

        # Move prey to location of first predator
        self.field.get_prey().location = (0, 0)
        state = State.state_from_field(self.field)
        self.assertTrue(state.is_terminal())
        self.assertFalse(state.predators_have_collided())
        self.assertTrue(state.prey_is_caught())

        # Move predator 1 to location of last predator
        self.field.get_predators()[0].location = (10, 0)
        state = State.state_from_field(self.field)
        self.assertTrue(state.is_terminal())
        self.assertTrue(state.predators_have_collided())
        self.assertFalse(state.prey_is_caught())

    def test_all_states(self):
        all_states = State.all_states(self.field)
        self.assertEqual(len(all_states), 1771561)

    def test_all_states_without_terminal(self):
        states = State.all_states_without_terminal(self.field)
        self.assertEqual(len(states), 1685040)
Ejemplo n.º 15
0
 #     experiment_name = ["3 Predator Random policy"],
 #     prey_plearner = [ProbabilisticPlearner],
 #     prey_plearner_params = [dict()],
 #     pred_plearners = [[ProbabilisticPlearner,ProbabilisticPlearner,ProbabilisticPlearner]],
 #     pred_plearner_params = [[dict(),dict(),dict()]],
 #     n_episodes = [10000],
 #     field = [Field(11, 11)],
 # ),
 dict(
     experiment_name = ["1 vs 1 with minimax-q for predator 5by5"],
     prey_plearner = [MiniMaxQPlearner],
     prey_plearner_params = [dict(end_alpha=0.5,num_episodes=500,epsilon=0.1,gamma=0.7)],
     pred_plearners = [[MiniMaxQPlearner]],
     pred_plearner_params = [[dict(end_alpha=0.5,num_episodes=500,epsilon=0.1,gamma=0.7)]],
     n_episodes = [1000],
     field = [Field(5, 5)]
 ),
 # dict(
 #     experiment_name = ["1 vs 2 Independent Q-learning Greedy"],
 #     prey_plearner = [QPlearner],
 #     prey_plearner_params = [dict(policy=GreedyPolicy(value_init=15, epsilon=0.1,
 #                                      gamma=0.9, q_value_select=True),
 #                  learning_rate=0.1, discount_factor=0.9)],
 #     pred_plearners = [[QPlearner,QPlearner]],
 #     pred_plearner_params = [[dict(policy=GreedyPolicy(value_init=15, epsilon=0.1,
 #                                      gamma=0.9, q_value_select=True),learning_rate=0.1, discount_factor=0.9),
 #                              dict(policy=GreedyPolicy(value_init=15, epsilon=0.1,
 #                                      gamma=0.9, q_value_select=True),learning_rate=0.1, discount_factor=0.9)]],
 #     n_episodes = [10000],
 #     field = [Field(11, 11)],
 # ),
Ejemplo n.º 16
0
def run_minimax(n_episodes=1000):
    # initialize the environment
    field = Field(5, 5)
    """
    initial state:
    | | | |
    |X|O| |
    | | | |
    """
    pred1loc = (0, 0)
    preyloc = (2, 2)

    predator1 = Predator(id="Plato", location=pred1loc)

    # WoLF
    predator1.plearner = MiniMaxQPlearner(field=field,
                                          agent=predator1,
                                          end_alpha=0.1,
                                          num_episodes=n_episodes,
                                          epsilon=0.1)
    field.add_player(predator1)

    chip = Prey(id="Kant", location=preyloc, tripping_prob=0.2)
    chip.plearner = MiniMaxQPlearner(field=field,
                                     agent=chip,
                                     end_alpha=0.1,
                                     num_episodes=n_episodes,
                                     epsilon=0.1)
    field.add_player(chip)
    field.init_players()

    plot_state = State([(1, 0)])

    num_steps = []
    pred_win = []
    value_of_pred1 = []
    value_of_prey = []

    for i in range(0, n_episodes):
        predator1.location = pred1loc
        chip.location = preyloc
        field.update_state()
        field.steps = 0
        # run the simulation
        while not field.is_ended():
            field.run_step()
            # print field.state

        num_steps.append(field.steps)
        pred_win.append(field.state.prey_is_caught())
        value_of_pred1.append(
            predator1.plearner.policy.get_probability_mapping(plot_state))
        # print predator1.plearner.policy.get_probability_mapping(plot_state)
        value_of_prey.append(
            chip.plearner.policy.get_probability_mapping(plot_state))

        # print progress every 10%
        if n_episodes >= 10 and i % (n_episodes / 10) == 0:
            print int(1.0 * i / n_episodes * 100), "%:", field.steps, "steps"

    # some list wrangling to get a list of 5 action lists with values for each predator
    vp1 = [[val[0] for val in sublist] for sublist in zip(*value_of_pred1)]
    vpc = [[val[0] for val in sublist] for sublist in zip(*value_of_prey)]

    # create plots
    colors = ["r", "b", "g", "k", "m"]
    actions = {
        (0, 0): "stay",
        (-1, 0): "left",
        (1, 0): "right",
        (0, -1): "up",
        (0, 1): "down"
    }
    plt.figure(figsize=(15, 15))

    s = plt.subplot(2, 1, 1)
    # s.set_yscale("log")
    plt.ylim([-0.1, 1.1])
    for index, action in enumerate(predator1.actions):
        plt.plot(vp1[index], c=colors[index], label=actions[action])
    plt.title("action probabilities for predator 1")
    plt.legend(loc="upper right")

    s = plt.subplot(2, 1, 2)
    #s.set_yscale("log")
    plt.ylim([-0.1, 1.1])
    for index, action in enumerate(chip.actions):
        plt.plot(vpc[index], c=colors[index], label=actions[action])
    plt.title("action probabilities for prey")

    plt.suptitle(str(n_episodes) + " episodes")
    plt.savefig(get_output_path() + "policychange-minimax-" + str(n_episodes) +
                ".pdf")
Ejemplo n.º 17
0
def run(n_episodes=1000, gui=False):
    """
    runs a simulation with 3 predators, one prey and random policies for all agents
    :return:
    """

    #initialize the environment
    field = Field(11, 11)
    num_episodes = n_episodes

    pred1loc = (0, 0)
    pred2loc = (10, 10)
    pred3loc = (0, 10)
    preyloc = (5, 5)

    #initialize the predators
    predator1 = Predator(id="Plato", location=pred1loc)
    predator2 = Predator(id="Pythagoras", location=pred2loc)
    # predator3 = Predator(pred3loc)

    #probabilistic
    # predator1.plearner = ProbabilisticPlearner(field=field, agent=predator1)
    # predator2.plearner = ProbabilisticPlearner(field=field, agent=predator2)
    # predator3.plearner = ProbabilisticPlearner(field=field, agent=predator3)

    #greedy Q
    #predator1.plearner = SarsaPlearner.create_greedy_plearner(field=field, agent=predator1, value_init=0,epsilon=0.01)
    # predator2.plearner = SarsaPlearner.create_greedy_plearner(field=field, agent=predator2, value_init=0,epsilon=0.01)
    # predator1.plearner = QPlearner.create_greedy_plearner(field=field, agent=predator1, value_init=0)
    # predator2.plearner = QPlearner.create_greedy_plearner(field=field, agent=predator2, value_init=0)
    # predator3.plearner = QPlearner.create_greedy_plearner(field=field, agent=predator3)

    # wolf
    predator1.plearner = Wolf_phc.create_greedy_plearner(field=field,
                                                         agent=predator1)
    predator2.plearner = Wolf_phc.create_greedy_plearner(field=field,
                                                         agent=predator2)
    # predator3.plearner = Wolf_phc.create_greedy_plearner(field=field, agent=predator3)

    #softmax Q
    #predator1.plearner = QPlearner.create_softmax_plearner(field=field, agent=predator1)
    #predator2.plearner = QPlearner.create_softmax_plearner(field=field, agent=predator2)
    # predator3.plearner = QPlearner.create_softmax_plearner(field=field, agent=predator3)

    #minimax q
    # predator1.plearner = MiniMaxQPlearner(field=field,agent=predator1,end_alpha=0.01,num_episodes=num_episodes)

    field.add_player(predator1)
    field.add_player(predator2)
    # field.add_player(predator3)
    #initialize the prey
    chip = Prey(id="Kant", location=preyloc)

    # chip.plearner = ProbabilisticPlearner(field=field, agent=chip)
    chip.plearner = Wolf_phc.create_greedy_plearner(field=field,
                                                    agent=chip,
                                                    epsilon=0.01)
    #chip.plearner = QPlearner.create_softmax_plearner(field=field, agent=chip)
    # chip.plearner = MiniMaxQPlearner(field=field,agent=chip,end_alpha=0.01,num_episodes=num_episodes)

    field.add_player(chip)

    field.init_players()

    # set GUI
    if gui:
        GUI = GameFrame(field=field)

    num_steps = []
    pred_win = []

    for i in range(0, n_episodes):
        predator1.location = pred1loc
        predator2.location = pred2loc
        #predator3.location = pred3loc
        chip.location = preyloc
        field.update_state()
        field.steps = 0
        #run the simulation
        while not field.is_ended():
            field.run_step()
            if gui and i == n_episodes - 1:
                GUI.update()
                time.sleep(0.2)

        num_steps.append(field.steps)
        pred_win.append(field.state.prey_is_caught())
        # breakpoint
        #if i > 900:
        #    pass

        #print State.state_from_field(field)
        num_steps.append(field.steps)
        if i % 100 == 0:
            print i
        # print State.state_from_field(field), field.steps, field.state.prey_is_caught()
        # print State.state_from_field(field), field.steps, field.state.prey_is_caught()
        # print [str(state) + ": " + str([predator1.plearner.policy.value[State([state]),action] for action in predator1.get_actions() ])for state in itertools.product([-1,0,1],repeat=2)]
        # for action in chip.get_actions():
        #     print '1', action, predator1.plearner.policy.get_value(State([(0,-1),(0,1)]),action)
        #     print '2', action, predator2.plearner.policy.get_value(State([(0,-1),(0,1)]),action)

    step = 50
    plot_steps(num_steps,
               pred_win,
               window_size=step,
               title="moving average over " + str(step) + " episodes")
Ejemplo n.º 18
0
def run_wolf(n_episodes=1000):
    # initialize the environment
    field = Field(3, 3)
    """
    initial state:
    | | | |
    |X|O|X|
    | | | |
    """
    pred1loc = (0, 1)
    pred2loc = (2, 1)
    preyloc = (1, 1)

    predator1 = Predator(id="Plato", location=pred1loc)
    predator2 = Predator(id="Pythagoras", location=pred2loc)

    # WoLF
    predator1.plearner = Wolf_phc.create_greedy_plearner(field=field,
                                                         agent=predator1)
    predator2.plearner = Wolf_phc.create_greedy_plearner(field=field,
                                                         agent=predator2)
    field.add_player(predator1)
    field.add_player(predator2)

    chip = Prey(id="Kant", location=preyloc)
    chip.plearner = Wolf_phc.create_greedy_plearner(field=field,
                                                    agent=chip,
                                                    epsilon=0.01)
    field.add_player(chip)
    field.init_players()

    plot_state = State.state_from_field(field)

    num_steps = []
    pred_win = []
    value_of_pred1 = []
    value_of_pred2 = []
    value_of_prey = []

    for i in range(0, n_episodes):
        predator1.location = pred1loc
        predator2.location = pred2loc
        chip.location = preyloc
        field.update_state()
        field.steps = 0
        # run the simulation
        while not field.is_ended():
            field.run_step()

        num_steps.append(field.steps)
        pred_win.append(field.state.prey_is_caught())
        value_of_pred1.append(
            predator1.plearner.policy.get_probability_mapping(plot_state))
        value_of_pred2.append(
            predator2.plearner.policy.get_probability_mapping(plot_state))
        value_of_prey.append(
            chip.plearner.policy.get_probability_mapping(plot_state))

        # print progress every 10%
        if n_episodes > 10 and i % (n_episodes / 10) == 0:
            print int(1.0 * i / n_episodes * 100), "%"

    # some list wrangling to get a list of 5 action lists with values for each predator
    vp1 = [[val[0] for val in sublist] for sublist in zip(*value_of_pred1)]
    vp2 = [[val[0] for val in sublist] for sublist in zip(*value_of_pred2)]
    vpc = [[val[0] for val in sublist] for sublist in zip(*value_of_prey)]

    # create plots
    colors = ["r", "b", "g", "k", "m"]
    actions = {
        (0, 0): "stay",
        (-1, 0): "left",
        (1, 0): "right",
        (0, -1): "up",
        (0, 1): "down"
    }
    plt.figure(figsize=(15, 15))

    s = plt.subplot(3, 1, 1)
    s.set_yscale("log")
    for index, action in enumerate(predator1.actions):
        plt.plot(vp1[index], c=colors[index], label=actions[action])
    plt.title("action probabilities for predator 1")
    plt.legend(loc="upper right")

    s = plt.subplot(3, 1, 2)
    s.set_yscale("log")
    for index, action in enumerate(predator2.actions):
        plt.plot(vp2[index], c=colors[index], label=actions[action])
    plt.title("action probabilities for predator 2")
    # plt.legend(loc="upper left")

    s = plt.subplot(3, 1, 3)
    s.set_yscale("log")
    for index, action in enumerate(chip.actions):
        plt.plot(vpc[index], c=colors[index], label=actions[action])
    plt.title("action probabilities for prey")

    plt.suptitle(str(n_episodes) + " episodes")
    plt.savefig(get_output_path() + "policychange-wolf-" + str(n_episodes) +
                ".pdf")
Ejemplo n.º 19
0
def run_wolf(n_episodes=1000):
    # initialize the environment
    field = Field(3, 3)

    """
    initial state:
    | | | |
    |X|O|X|
    | | | |
    """
    pred1loc = (0, 1)
    pred2loc = (2, 1)
    preyloc = (1, 1)

    predator1 = Predator(id="Plato", location=pred1loc)
    predator2 = Predator(id="Pythagoras", location=pred2loc)

    # WoLF
    predator1.plearner = Wolf_phc.create_greedy_plearner(field=field, agent=predator1)
    predator2.plearner = Wolf_phc.create_greedy_plearner(field=field, agent=predator2)
    field.add_player(predator1)
    field.add_player(predator2)

    chip = Prey(id="Kant", location=preyloc)
    chip.plearner = Wolf_phc.create_greedy_plearner(field=field, agent=chip, epsilon=0.01)
    field.add_player(chip)
    field.init_players()

    plot_state = State.state_from_field(field)

    num_steps = []
    pred_win = []
    value_of_pred1 = []
    value_of_pred2 = []
    value_of_prey = []

    for i in range(0, n_episodes):
        predator1.location = pred1loc
        predator2.location = pred2loc
        chip.location = preyloc
        field.update_state()
        field.steps = 0
        # run the simulation
        while not field.is_ended():
            field.run_step()

        num_steps.append(field.steps)
        pred_win.append(field.state.prey_is_caught())
        value_of_pred1.append(predator1.plearner.policy.get_probability_mapping(plot_state))
        value_of_pred2.append(predator2.plearner.policy.get_probability_mapping(plot_state))
        value_of_prey.append(chip.plearner.policy.get_probability_mapping(plot_state))

        # print progress every 10%
        if n_episodes > 10 and i % (n_episodes / 10) == 0:
            print int(1.0 * i / n_episodes * 100), "%"

    # some list wrangling to get a list of 5 action lists with values for each predator
    vp1 = [[val[0] for val in sublist] for sublist in zip(*value_of_pred1)]
    vp2 = [[val[0] for val in sublist] for sublist in zip(*value_of_pred2)]
    vpc = [[val[0] for val in sublist] for sublist in zip(*value_of_prey)]


    # create plots
    colors = ["r", "b", "g", "k", "m"]
    actions = {
        (0, 0): "stay",
        (-1, 0): "left",
        (1, 0): "right",
        (0, -1): "up",
        (0, 1): "down"
    }
    plt.figure(figsize=(15, 15))

    s = plt.subplot(3, 1, 1)
    s.set_yscale("log")
    for index, action in enumerate(predator1.actions):
        plt.plot(vp1[index], c=colors[index], label=actions[action])
    plt.title("action probabilities for predator 1")
    plt.legend(loc="upper right")

    s = plt.subplot(3, 1, 2)
    s.set_yscale("log")
    for index, action in enumerate(predator2.actions):
        plt.plot(vp2[index], c=colors[index], label=actions[action])
    plt.title("action probabilities for predator 2")
    # plt.legend(loc="upper left")

    s = plt.subplot(3, 1, 3)
    s.set_yscale("log")
    for index, action in enumerate(chip.actions):
        plt.plot(vpc[index], c=colors[index], label=actions[action])
    plt.title("action probabilities for prey")

    plt.suptitle(str(n_episodes) + " episodes")
    plt.savefig(get_output_path() + "policychange-wolf-" + str(n_episodes) + ".pdf")
Ejemplo n.º 20
0
        return None

    def get_field_center(self, col, row):
        """
        calculates the x and y pixel coordinates of the field at given column and row in the grid.
        :param col:
        :param row:
        :return: dictionary with elements x and y
        """
        x = self.xoffset + col * self.cellwidth + 0.5 * self.cellwidth
        y = self.yoffset + row * self.cellheight + 0.5 * self.cellheight
        return {"x": x, "y": y}


if __name__ == "__main__":
    environment = Field(11, 11)
    # fatcat = Predator((0, 0))
    # fatcat.policy = RandomPredatorPolicy(fatcat, environment)
    # chip = Prey((5, 5))
    # chip.policy = RandomPreyPolicy(chip, environment)
    # environment.add_player(fatcat)
    # environment.add_player(chip)
    # gui = GameFrame(field=environment)
    # gui.draw_state(environment.get_current_state_complete())
    # i = 0
    # while not environment.is_ended():
    #     fatcat.act()
    #     chip.act()
    #     # print environment
    #     gui.update()
    #     i += 1
Ejemplo n.º 21
0
    def __init__(self, table_name: str, table_name_plural: str, field_list: [{str, str}]) -> object:
        self.generated_serial = NumberUtils.random(18)
        self.setTableName(table_name)
        self.setTableNamePlural(table_name_plural)

        self.field_list = list(map(lambda field: Field(field['name'], field['data_type']), field_list))
Ejemplo n.º 22
0
class App(tk.Frame):
    def __init__(self, master, *args, **kwargs):
        super().__init__(master=master, *args, **kwargs)
        self.is_paused = True
        self.root = master
        self.width = 700
        self.height = 500

        self.root.geometry("{}x{}".format(self.width, self.height))
        self.root.resizable(False, False)

        self.left_bar = tk.Frame(master=self.root)
        self.left_bar.pack(side=tk.LEFT)

        self.automate_seed_entry_label = tk.Label(self.left_bar,
                                                  text=SEED_INPUT_LABEL)
        self.automate_seed_entry_label.grid()

        self.automate_seed_entry = tk.Entry(self.left_bar)
        self.automate_seed_entry.bind('<Key-Return>', self.set_drawer)
        self.automate_seed_entry.grid()

        self.next_step_button = tk.Button(master=self.left_bar,
                                          text=NEXT_STEP,
                                          command=self.next_step)
        self.next_step_button.grid()

        self.pause_button_text = tk.StringVar(value=START)
        self.pause_button = tk.Button(master=self.left_bar,
                                      textvariable=self.pause_button_text,
                                      command=self.pause)
        self.pause_button.grid()

        self.screenshot_button_text = tk.StringVar(value=SCREENSHOT)
        self.screenshot_button = tk.Button(
            master=self.left_bar,
            textvariable=self.screenshot_button_text,
            command=self.screenshot)
        self.screenshot_button.grid()

        self.canvas_width = 500
        self.canvas_height = 500

        self.canvas = tk.Canvas(master=self.root,
                                cnf={
                                    "width": self.canvas_width,
                                    "height": self.canvas_height
                                })
        self.canvas.pack(
            side=tk.RIGHT)  # place(x=self.width - self.canvas_width)

        field_width = 100
        field_height = 100
        self.field = Field(field_width, field_height, 5)
        self.field.set_cell(field_width // 2, field_height // 2, 1)
        self.cells = self.get_rectangles()

        self.drawer = self.get_drawer(501)
        self.redraw_field()

    def set_drawer(self, _):
        seed = self.automate_seed_entry.get()
        self.drawer = self.get_drawer(int(seed))
        self.field.refresh()
        self.drawer.set_initial(self.field)
        self.next_step()

    def update(self):
        if not self.is_paused:
            self.next_step()
            self.redraw_field()
            self.canvas.after(1, self.update)

    def redraw_field(self):
        width = self.field.get_width()
        height = self.field.get_height()
        w_range = range(width)
        h_range = range(height)
        for y in h_range:
            for x in w_range:
                color = colors.get(self.field.get_cell(y, x))
                rect = self.cells[y][x]
                self.canvas.itemconfig(rect, fill=color)

    def next_step(self):
        next_state = self.drawer.next_step(self.field)
        self.field.apply_state(next_state)
        self.redraw_field()

    @staticmethod
    def get_drawer(seed):
        # TODO: Add dynamic Drawer loading via dropdown menu
        return Crystal(seed)

    def get_rectangles(self):
        width = self.field.get_width()
        height = self.field.get_height()
        width_range = range(width)
        height_range = range(height)
        rectangles = [[None for _ in width_range] for _ in height_range]
        cell_size = self.field.get_cell_size()
        for y in range(height):
            for x in range(width):
                color = colors.get(self.field.get_cell(y, x))
                start_x = x * cell_size
                start_y = y * cell_size
                rectangle = self.canvas.create_rectangle(start_x,
                                                         start_y,
                                                         start_x + cell_size,
                                                         start_y + cell_size,
                                                         fill=color)
                rectangles[y][x] = rectangle
        return rectangles

    def pause(self):
        self.is_paused = not self.is_paused
        self.pause_button_text.set(START if self.is_paused else PAUSE)

        self.update()

    def screenshot(self):
        initial_filename = self.get_screenshot_filename()
        initial_path = os.path.join(os.getcwd(), initial_filename)
        path = filedialog.asksaveasfilename(initialdir=initial_path,
                                            initialfile=initial_filename,
                                            defaultextension=".png",
                                            title="Save Screenshot",
                                            filetypes=(("jpeg files",
                                                        "*.jpg"), ))
        if path:
            self.save_screenshot(path)
        self.save_screenshot(initial_path)

    def save_screenshot(self, path):
        x = self.root.winfo_rootx() + self.canvas.winfo_x()
        y = self.root.winfo_rooty() + self.canvas.winfo_y()
        x1 = x + self.canvas.winfo_width()
        y1 = y + self.canvas.winfo_height()
        ImageGrab.grab().crop((x, y, x1, y1)).save(path)

    def get_screenshot_filename(self):
        return '{drawer}-{date}.png'.format(
            drawer=self.drawer.__class__.__name__,
            date=datetime.today().strftime('%m-%d-%Y'))
Ejemplo n.º 23
0
def run_minimax(n_episodes=1000):
    # initialize the environment
    field = Field(5, 5)

    """
    initial state:
    | | | |
    |X|O| |
    | | | |
    """
    pred1loc = (0, 0)
    preyloc = (2, 2)

    predator1 = Predator(id="Plato", location=pred1loc)

    # WoLF
    predator1.plearner = MiniMaxQPlearner(field=field, agent=predator1, end_alpha=0.1, num_episodes=n_episodes, epsilon=0.1)
    field.add_player(predator1)

    chip = Prey(id="Kant", location=preyloc, tripping_prob=0.2)
    chip.plearner = MiniMaxQPlearner(field=field, agent=chip, end_alpha=0.1, num_episodes=n_episodes, epsilon=0.1)
    field.add_player(chip)
    field.init_players()

    plot_state = State([(1, 0)])

    num_steps = []
    pred_win = []
    value_of_pred1 = []
    value_of_prey = []

    for i in range(0, n_episodes):
        predator1.location = pred1loc
        chip.location = preyloc
        field.update_state()
        field.steps = 0
        # run the simulation
        while not field.is_ended():
            field.run_step()
            # print field.state

        num_steps.append(field.steps)
        pred_win.append(field.state.prey_is_caught())
        value_of_pred1.append(predator1.plearner.policy.get_probability_mapping(plot_state))
        # print predator1.plearner.policy.get_probability_mapping(plot_state)
        value_of_prey.append(chip.plearner.policy.get_probability_mapping(plot_state))

        # print progress every 10%
        if n_episodes >= 10 and i % (n_episodes / 10) == 0:
            print int(1.0 * i / n_episodes * 100), "%:", field.steps, "steps"

    # some list wrangling to get a list of 5 action lists with values for each predator
    vp1 = [[val[0] for val in sublist] for sublist in zip(*value_of_pred1)]
    vpc = [[val[0] for val in sublist] for sublist in zip(*value_of_prey)]


    # create plots
    colors = ["r", "b", "g", "k", "m"]
    actions = {
        (0, 0): "stay",
        (-1, 0): "left",
        (1, 0): "right",
        (0, -1): "up",
        (0, 1): "down"
    }
    plt.figure(figsize=(15, 15))

    s = plt.subplot(2, 1, 1)
    # s.set_yscale("log")
    plt.ylim([-0.1, 1.1])
    for index, action in enumerate(predator1.actions):
        plt.plot(vp1[index], c=colors[index], label=actions[action])
    plt.title("action probabilities for predator 1")
    plt.legend(loc="upper right")

    s = plt.subplot(2, 1, 2)
    #s.set_yscale("log")
    plt.ylim([-0.1, 1.1])
    for index, action in enumerate(chip.actions):
        plt.plot(vpc[index], c=colors[index], label=actions[action])
    plt.title("action probabilities for prey")

    plt.suptitle(str(n_episodes) + " episodes")
    plt.savefig(get_output_path() + "policychange-minimax-" + str(n_episodes) + ".pdf")
Ejemplo n.º 24
0
# shit code to add parent directory as a package
import os, inspect, sys
current_dir = os.path.dirname(
    os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)

import scrape_field as scraper
import sheets.update_field as db
from models.field import Field

fields = [
    Field('Coronet Peak',
          'https://www.snow-forecast.com/resorts/Coronet-Peak/6day/mid'),
    Field('Cardrona',
          'https://www.snow-forecast.com/resorts/Cardrona/6day/mid'),
    Field('Treble Cone',
          'https://www.snow-forecast.com/resorts/Treble-Cone/6day/mid'),
    Field('Remarkables',
          'https://www.snow-forecast.com/resorts/Remarkables/6day/mid'),
    Field('Round Hill',
          'https://www.snow-forecast.com/resorts/Round-Hill/6day/mid'),
    Field('Mount Hutt',
          'https://www.snow-forecast.com/resorts/Mount-Hutt/6day/mid'),
    # Field('Whara Kea Chalet', 'https://www.snow-forecast.com/resorts/Whara-Kea-Chalet/6day/mid'),
]


def main():
    # scrape all the fields data
    [scraper.do_scrape(field) for field in fields]