Пример #1
0
    def _quit(self):
        """ 
        Gracefully quit the simulator.
        """

        quitEvent = events.QuitEvent()
        AppState.get_state().get_event_manager().post_event(quitEvent)
Пример #2
0
def main():
    """
    Main function of the application.
    """
    # Initialize the event manager.
    event_manager = events.EventManager()
    AppState.get_state().set_event_manager(event_manager)

    # Initialize and register the application heartbeat.
    heart_beat = HeartBeat()
    event_manager.register_listener(heart_beat)

    # Initialize and register the world.
    basic_experiment = experiment.basic.BasicExperiment()
    world = basic_experiment.get_world()
    event_manager.register_listener(world)
    AppState.get_state().set_world(world)

    # Initialize pygame.
    surface = init()

    # Initialize and register the view.
    main_view = view.View(surface)
    event_manager.register_listener(main_view)

    # Initialize and register the controller.
    main_controller = controller.Controller()
    event_manager.register_listener(main_controller)

    # Start the heartbeat.
    heart_beat.run()
Пример #3
0
    def consider_alternative_interactions(self):
        """
        Add-on to the sequential system, between steps 2 and 3.

        Proposed post-interactions are scrutinized by looking at alternative
        interactions that have occurred when trying to activate that interaction.
        If the alternative interaction itself is also proposed, the context
        indicates the alternative is likely to occur. Thus, the agent
        anticipates that the alternative might happen instead of the intended
        interaction. The intended interaction's proclivity is temporarily
        adjusted to reflect this.
        """
        proposed = self.propose_interactions()

        proposed = map(lambda (proposed_interaction, weight):
            (
                proposed_interaction,
                weight * self.interaction_memory.get_valence(proposed_interaction, process_boredom = True)
            ), proposed)

        n = 0

        proposed_interactions = map(lambda (proposed_interaction, proclivity): proposed_interaction, proposed)

        for (proposed_interaction, proclivity) in proposed:
            for alternative in self.interaction_memory.get_alternative_interactions(proposed_interaction):
                if alternative in proposed_interactions:
                    AppState.get_state().get_logger().info("%s - Anticipating alternative %s for %s" % (self.name, alternative, proposed_interaction))
                    proclivity += self.interaction_memory.get_proclivity(alternative)
            proposed[n] = (proposed_interaction, proclivity)
            n += 1

        return proposed
Пример #4
0
    def quit(self):
        """ 
        Gracefully quit the simulator.
        """

        quitEvent = events.QuitEvent()
        AppState.get_state().get_event_manager().post_event(quitEvent)
Пример #5
0
    def enacted_interaction(self, interaction, data):
        # Post enacted interaction event
        AppState.state.get_event_manager().post_event(
            events.AgentEnactionEvent(self, interaction, -1))
        self.interaction_memory.add_interaction_to_history(interaction)

        AppState.get_state().get_logger().info("%s - Enacted: %s" %
                                               (self.name, interaction))
Пример #6
0
    def enacted_interaction(self, interaction, data):
        # Post enacted interaction event
        AppState.state.get_event_manager().post_event(events.AgentEnactionEvent(
            self, 
            interaction, 
            -1))
        self.interaction_memory.add_interaction_to_history(interaction)

        AppState.get_state().get_logger().info("%s - Enacted: %s" % (self.name, interaction))
Пример #7
0
    def run(self):
        """
        Process PyGame events until halt is true.
        """

        self.halt = False

        print("Starting heartbeat.")
        while not self.halt:
            event = events.TickEvent()
            AppState.get_state().get_event_manager().post_event(event)
            AppState.get_state().get_clock().tick(settings.MAX_FPS)
Пример #8
0
    def prepare_interaction(self):
        chosen = None
        self.color_old = self.color  # Temporarily change color to indicate this agent has to be controlled
        self.color = (255, 255, 0, 255)

        # Secondary pygame loop to process events until the user made a decision
        while chosen == None:
            if pygame.key.get_pressed()[pygame.K_LALT]:
                # If left alt is pressed, use the regular controller(s)
                AppState.get_state().get_event_manager().post_event(
                    events.ControlEvent())
            else:
                # If left alt is not pressed, use this agent's controller
                interaction = self.get_interaction_from_input()
                if not interaction == None:
                    self.color = self.color_old
                    chosen = interaction

            # Draw views
            AppState.get_state().get_event_manager().post_event(
                events.DrawEvent())
            # Pygame tick control
            AppState.get_state().get_clock().tick(settings.MAX_FPS)

        # Post interaction preparation event
        AppState.state.get_event_manager().post_event(
            events.AgentPreparationEvent(self, chosen, -1))

        AppState.get_state().get_logger().info("%s - > %s" %
                                               (self.name, chosen))
        return chosen
Пример #9
0
    def prepare_interaction(self):
        chosen = None
        self.color_old = self.color # Temporarily change color to indicate this agent has to be controlled
        self.color = (255,255,0,255)

        # Secondary pygame loop to process events until the user made a decision
        while chosen == None:
            if pygame.key.get_pressed()[pygame.K_LALT]:
                # If left alt is pressed, use the regular controller(s)
                AppState.get_state().get_event_manager().post_event(events.ControlEvent())
            else:
                # If left alt is not pressed, use this agent's controller
                interaction = self.get_interaction_from_input()
                if not interaction == None:
                    self.color = self.color_old
                    chosen = interaction

            # Draw views
            AppState.get_state().get_event_manager().post_event(events.DrawEvent())
            # Pygame tick control
            AppState.get_state().get_clock().tick(settings.MAX_FPS)

        # Post interaction preparation event
        AppState.state.get_event_manager().post_event(events.AgentPreparationEvent(
            self, 
            chosen, 
            -1))

        AppState.get_state().get_logger().info("%s - > %s" % (self.name, chosen))
        return chosen
Пример #10
0
 def draw_entities(self):
     world = AppState.get_state().get_world()
     
     # Create sprites for entities we do not have sprites for yet
     for entity in AppState.get_state().get_world().get_entities():
         if entity not in self.sprites.keys():
             self.sprites[entity] = Sprite(entity, self)
             self.group.add(self.sprites[entity])
     # Remove sprites for entities that were removed
     for entity in self.sprites.keys():
         if entity not in AppState.get_state().get_world().get_entities():
             self.group.remove(self.sprites[entity])
             del self.sprites[entity]
     
     self.group.draw(self.surface)
Пример #11
0
    def get_cell_height(self):
        """
        Get the height of a cell on the canvas.

        :return: The height of a single cell on the canvas.
        :rtype: int
        """
        return round(float(self.surface.get_height()) / AppState.get_state().get_world().get_height())
Пример #12
0
    def save_surface_to_file(self):
        # Create output directory if it does not exist
        if not self.created_renders_dir and not os.path.exists(settings.SIMULATION_RENDERS_DIR):
            os.makedirs(settings.SIMULATION_RENDERS_DIR)
            self.created_renders_dir = True

        path = os.path.join(settings.SIMULATION_RENDERS_DIR, "t%s.png" % AppState.get_state().get_t())
        pygame.image.save(self.surface, path)
Пример #13
0
 def get_interaction_from_input(self):
     """
     Get the interaction the agent should enact from user input
     """
     for event in pygame.event.get():
         if event.type == pygame.QUIT: 
             quitEvent = events.QuitEvent()
             AppState.get_state().get_event_manager().post_event(quitEvent)
             return
         elif event.type == pygame.KEYDOWN:
             if event.key == pygame.K_UP:
                 return self.interaction_memory.find_interaction_by_name_and_result("Step")
             elif event.key == pygame.K_LEFT:
                 return self.interaction_memory.find_interaction_by_name_and_result("Turn Left")
             elif event.key == pygame.K_RIGHT:
                 return self.interaction_memory.find_interaction_by_name_and_result("Turn Right")
             elif event.key == pygame.K_SLASH:
                 return self.choose_from_list()
Пример #14
0
    def process_input(self):
        """
        Process user input.
        """

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                self.quit()
                return
            elif event.type == pygame.KEYDOWN:
                if event.key == pygame.K_ESCAPE:
                    self.quit()
                    return
                elif event.key == pygame.K_SPACE:
                    AppState.get_state().toggle_pause()
                    return
                elif event.key == pygame.K_s and pygame.key.get_pressed()[
                        pygame.K_LCTRL]:
                    self.save_agent()
                    return
                elif event.key == pygame.K_w and pygame.key.get_pressed()[
                        pygame.K_LCTRL]:
                    self.save_world()
                    return
                elif event.key == pygame.K_e and pygame.key.get_pressed()[
                        pygame.K_LCTRL]:
                    self.save_experiment()
                    return
                elif event.key == pygame.K_h:
                    self.help()
                    return
                elif event.key == pygame.K_r:
                    AppState.get_state().toggle_saving_simulation_renders()
                    if AppState.get_state().get_save_simulation_renders():
                        print "Now saving simulation renders to file."
                    else:
                        print "No longer saving simulation renders to file."
                    return

            if self.experiment_controller:
                if pygame.mouse.get_focused():
                    self.experiment_controller(event, pygame.mouse.get_pos())
                else:
                    self.experiment_controller(event, None)
Пример #15
0
def init():
    """
    Initialize pygame.

    :returns: The surface of the pygame display.
    """

    print("Loading pygame modules.")
    pygame.display.init()
    AppState.get_state().set_clock(pygame.time.Clock())
    flags = pygame.DOUBLEBUF
    surface = pygame.display.set_mode((
        AppState.get_state().get_world().get_width() * settings.CELL_WIDTH,
        AppState.get_state().get_world().get_height() * settings.CELL_HEIGHT,
    ), flags)
    surface.set_alpha(None)
    pygame.display.set_caption('Enactive Agents v2')

    return surface
Пример #16
0
    def save_surface_to_file(self):
        # Create output directory if it does not exist
        if not self.created_renders_dir and not os.path.exists(
                settings.SIMULATION_RENDERS_DIR):
            os.makedirs(settings.SIMULATION_RENDERS_DIR)
            self.created_renders_dir = True

        path = os.path.join(settings.SIMULATION_RENDERS_DIR,
                            "t%s.png" % AppState.get_state().get_t())
        pygame.image.save(self.surface, path)
Пример #17
0
    def get_cell_height(self):
        """
        Get the height of a cell on the canvas.

        :return: The height of a single cell on the canvas.
        :rtype: int
        """
        return round(
            float(self.surface.get_height()) /
            AppState.get_state().get_world().get_height())
Пример #18
0
    def select_intended_interaction(self):
        """
        Step 3 of the sequential system.

        The decisional mechanism; choose an interaction to enact (primitive
        or composite).

        The intended interaction is selected from the proposed interactions
        based on the weight of the activated interactions and the values of the
        proposed post interactions.
        """

        proposed = self.consider_alternative_interactions()
        proposed.sort(key=lambda x: x[1], reverse=True)
        proposed = map(lambda x: x[0], proposed)
        """
        Without alternatives:
        proposed = self.propose_interactions()
        proposed.sort(
            key = lambda x: x[1] * self.interaction_memory.get_valence(x[0], process_boredom = True), 
            reverse = True
        )
        proposed = map(lambda x: x[0], proposed)
        """

        if len(proposed) > 0 and self.interaction_memory.get_proclivity(
                proposed[0]) > 0:
            return proposed[0]
        elif len(proposed) == 0:
            # TODO: in Katja's implementation the activated interactions contain
            # some set of default interactions. The paper itself does not seem
            # to mention how to deal with an empty activated set.
            AppState.get_state().get_logger().info(
                "%s - No proposed interactions: exploring" % self.name)
            return random.choice(
                self.interaction_memory.get_primitive_interactions())
        else:
            AppState.get_state().get_logger().info(
                "%s - Negative proclivity: exploring" % self.name)
            return random.choice(
                self.interaction_memory.get_primitive_interactions())
Пример #19
0
 def get_interaction_from_input(self):
     """
     Get the interaction the agent should enact from user input
     """
     for event in pygame.event.get():
         if event.type == pygame.QUIT:
             quitEvent = events.QuitEvent()
             AppState.get_state().get_event_manager().post_event(quitEvent)
             return
         elif event.type == pygame.KEYDOWN:
             if event.key == pygame.K_UP:
                 return self.interaction_memory.find_interaction_by_name_and_result(
                     "Step")
             elif event.key == pygame.K_LEFT:
                 return self.interaction_memory.find_interaction_by_name_and_result(
                     "Turn Left")
             elif event.key == pygame.K_RIGHT:
                 return self.interaction_memory.find_interaction_by_name_and_result(
                     "Turn Right")
             elif event.key == pygame.K_SLASH:
                 return self.choose_from_list()
Пример #20
0
def init():
    """
    Initialize pygame.

    :returns: The surface of the pygame display.
    """

    print("Loading pygame modules.")
    pygame.display.init()
    AppState.get_state().set_clock(pygame.time.Clock())
    flags = pygame.DOUBLEBUF
    surface = pygame.display.set_mode(
        (
            AppState.get_state().get_world().get_width() * settings.CELL_WIDTH,
            AppState.get_state().get_world().get_height() * settings.CELL_HEIGHT,
        ), 
        flags)
    surface.set_alpha(None)
    pygame.display.set_caption('Enactive Agents v2')

    return surface
Пример #21
0
    def select_intended_interaction(self):
        """
        Step 3 of the sequential system.

        The decisional mechanism; choose an interaction to enact (primitive
        or composite).

        The intended interaction is selected from the proposed interactions
        based on the weight of the activated interactions and the values of the
        proposed post interactions.
        """

        proposed = self.consider_alternative_interactions()
        proposed.sort(
            key = lambda x: x[1],
            reverse = True
        )
        proposed = map(lambda x: x[0], proposed)

        """
        Without alternatives:
        proposed = self.propose_interactions()
        proposed.sort(
            key = lambda x: x[1] * self.interaction_memory.get_valence(x[0], process_boredom = True), 
            reverse = True
        )
        proposed = map(lambda x: x[0], proposed)
        """

        if len(proposed) > 0 and self.interaction_memory.get_proclivity(proposed[0]) > 0:
            return proposed[0]
        elif len(proposed) == 0:
            # TODO: in Katja's implementation the activated interactions contain
            # some set of default interactions. The paper itself does not seem 
            # to mention how to deal with an empty activated set.
            AppState.get_state().get_logger().info("%s - No proposed interactions: exploring" % self.name)
            return random.choice(self.interaction_memory.get_primitive_interactions())
        else:
            AppState.get_state().get_logger().info("%s - Negative proclivity: exploring" % self.name)
            return random.choice(self.interaction_memory.get_primitive_interactions())
Пример #22
0
    def prepare_interaction(self):
        if not self.enacting_interaction:
            # Decisional mechanism.
            # We are not currently enacting the primitives in a sequence of
            # interactions. Choose a new interaction to enact (steps 1-3).
            self.enacting_interaction = True
            self.enacting_interaction_step = 0
            self.enacted_sequence = []

            # Exploration
            if random.random() <= 0.1:
                # Choose a random primitive interaction (not a primitive perception interaction)
                self.intended_interaction = random.choice(filter(lambda x: isinstance(x, interaction.PrimitiveInteraction), self.interaction_memory.get_primitive_interactions()))
                AppState.get_state().get_logger().info("%s - EXPLORING" % (self.name))
            else:
                self.intended_interaction = self.select_intended_interaction()

            self.enacting_interaction_sequence = self.intended_interaction.unwrap()
            AppState.get_state().get_logger().info("%s - Intending: %s" % (self.name, self.intended_interaction))

        # Enact a primitive interaction from the sequence we are currently
        # enacting.
        intended_interaction = self.enacting_interaction_sequence[self.enacting_interaction_step]
        AppState.get_state().get_logger().info("%s - > %s" % (self.name, intended_interaction))

        # Step 4 of the sequential system, enact the interaction:
        # Post interaction preparation event
        AppState.state.get_event_manager().post_event(events.AgentPreparationEvent(
            self, 
            intended_interaction, 
            self.interaction_memory.get_valence(intended_interaction, process_boredom = True)))
        return (intended_interaction, intended_interaction)
Пример #23
0
    def consider_alternative_interactions(self):
        """
        Add-on to the sequential system, between steps 2 and 3.

        Proposed post-interactions are scrutinized by looking at alternative
        interactions that have occurred when trying to activate that interaction.
        If the alternative interaction itself is also proposed, the context
        indicates the alternative is likely to occur. Thus, the agent
        anticipates that the alternative might happen instead of the intended
        interaction. The intended interaction's proclivity is temporarily
        adjusted to reflect this.
        """
        proposed = self.propose_interactions()

        proposed = map(
            lambda (proposed_interaction, weight):
            (proposed_interaction, weight * self.interaction_memory.
             get_valence(proposed_interaction, process_boredom=True)),
            proposed)

        n = 0

        proposed_interactions = map(
            lambda (proposed_interaction, proclivity): proposed_interaction,
            proposed)

        for (proposed_interaction, proclivity) in proposed:
            for alternative in self.interaction_memory.get_alternative_interactions(
                    proposed_interaction):
                if alternative in proposed_interactions:
                    AppState.get_state().get_logger().info(
                        "%s - Anticipating alternative %s for %s" %
                        (self.name, alternative, proposed_interaction))
                    proclivity += self.interaction_memory.get_proclivity(
                        alternative)
            proposed[n] = (proposed_interaction, proclivity)
            n += 1

        return proposed
Пример #24
0
    def save_experiment(self):
        """
        Save the experiment to file.
        """

        print "---"
        print "Press [enter] to write the experiment to file, or [escape] to cancel."

        while True:
            event = pygame.event.wait()
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_ESCAPE:
                    print "Saving cancelled."
                    print "---"
                    return
                elif event.key == pygame.K_RETURN:
                    break

        try:
            import dill
        except ImportError:
            print "ERROR: Module 'dill' is required to save experiments."
        else:
            print "Saving the experiment to file..."

            # Create output directory if it does not exist
            if not os.path.exists(settings.EXPERIMENT_DIR):
                os.makedirs(settings.EXPERIMENT_DIR)

            # Pickle and save agents to file
            file_name = "%s.p" % strftime("%Y%m%dT%H%M%S")
            file_path = os.path.join(settings.EXPERIMENT_DIR, file_name)

            print " - Saving experiment to %s" % file_path
            dill.dump(AppState.get_state().get_experiment(),
                      open(file_path, "wb"))

            print "Experiment saved."

        print "---"
Пример #25
0
def render_content(tab, state_json):
    state_json = str(state_json)
    state = AppState.from_json(json.loads(state_json))

    logger.log(logging.INFO, msg=f"Choose tab \n\ttab={tab}")

    if tab == id.TabValue.tab_value_rent:
        return views.create_rental_graph(state.x_axis_years, state.rent)
    elif tab == id.TabValue.tab_value_mortgage_payment:
        return views.create_mortgage_payment_graph(state.x_axis_years,
                                                   state.mortgage)
    elif tab == id.TabValue.tab_value_remaining_mortgage:
        return views.create_mortgage_principle_graph(state.x_axis_years,
                                                     state.mortgage)
    elif tab == id.TabValue.tab_value_equity_by_rent:
        return views.create_renting_investment_portfolio(
            state.x_axis_years, renting_capital=state.renting_capital)
    elif tab == id.TabValue.tab_value_asset_for_buy:
        return views.create_buying_investment_graph(
            state.x_axis_years, property_value=state.property_value)
    else:
        return None
Пример #26
0
def main():
    """
    Main function of the application.
    """
    # Initialize the event manager.
    event_manager = events.EventManager()
    AppState.get_state().set_event_manager(event_manager)

    # Initialize and register the application heartbeat.
    heart_beat = HeartBeat()
    event_manager.register_listener(heart_beat)

    # Initialize and register the world.
    #experiment_ = experiment.experiment.Experiment.load_experiment("20161126T003019.p")
    experiment_ = experiment.basic.BasicVisionExperiment()
    AppState.get_state().set_experiment(experiment_)
    world = experiment_.get_world()
    event_manager.register_listener(world)
    AppState.get_state().set_world(world)

    # Initialize pygame.
    surface = init()

    # Initialize and register the view.
    main_view = view.View(surface)
    event_manager.register_listener(main_view)

    # Initialize the website trace history view.
    trace_view = agentevents.AgentEvents()
    event_manager.register_listener(trace_view)

    # Initialize and register the controller.
    main_controller = controller.Controller()
    event_manager.register_listener(main_controller)

    # Add the experiment controller to the controller
    main_controller.set_experiment_controller(
        lambda e, coords: experiment_.controller(
            e, main_view.window_coords_to_world_coords(coords)))

    # Start the webserver.
    webserver.trace_view = trace_view
    webserver.start()

    # Start the heartbeat.
    heart_beat.run()
Пример #27
0
    def save_agent(self):
        """
        Save all agents to files.
        """
        print "---"
        print "Press [enter] to write all agents to file, or [escape] to cancel."

        while True:
            event = pygame.event.wait()
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_ESCAPE:
                    print "Saving cancelled."
                    print "---"
                    return
                elif event.key == pygame.K_RETURN:
                    break

        print "Saving agents to file..."

        # Create output directory if it does not exist
        if not os.path.exists(settings.AGENT_DIR):
            os.makedirs(settings.AGENT_DIR)

        # Pickle and save agents to file
        agents = AppState.get_state().get_world().get_agents()
        for agent in agents:
            file_name = "%s - %s.p" % (strftime("%Y%m%dT%H%M%S"),
                                       agent.get_name())
            file_path = os.path.join(settings.AGENT_DIR, file_name)

            print " - Saving %s to %s" % (agent.get_name(), file_path)
            cPickle.dump(agent, open(file_path, "wb"))

        print "Agents saved."

        print "---"
Пример #28
0
    def prepare_interaction(self):
        if not self.enacting_interaction:
            # Decisional mechanism.
            # We are not currently enacting the primitives in a sequence of
            # interactions. Choose a new interaction to enact (steps 1-3).
            self.enacting_interaction = True
            self.enacting_interaction_step = 0
            self.enacted_sequence = []

            # Exploration
            if random.random() <= 0.1:
                # Choose a random primitive interaction (not a primitive perception interaction)
                self.intended_interaction = random.choice(
                    filter(
                        lambda x: isinstance(x, interaction.
                                             PrimitiveInteraction),
                        self.interaction_memory.get_primitive_interactions()))
                AppState.get_state().get_logger().info("%s - EXPLORING" %
                                                       (self.name))
            else:
                self.intended_interaction = self.select_intended_interaction()

            self.enacting_interaction_sequence = self.intended_interaction.unwrap(
            )
            AppState.get_state().get_logger().info(
                "%s - Intending: %s" % (self.name, self.intended_interaction))

        # Enact a primitive interaction from the sequence we are currently
        # enacting.
        intended_interaction = self.enacting_interaction_sequence[
            self.enacting_interaction_step]
        AppState.get_state().get_logger().info(
            "%s - > %s" % (self.name, intended_interaction))

        # Step 4 of the sequential system, enact the interaction:
        # Post interaction preparation event
        AppState.state.get_event_manager().post_event(
            events.AgentPreparationEvent(
                self, intended_interaction,
                self.interaction_memory.get_valence(intended_interaction,
                                                    process_boredom=True)))
        return (intended_interaction, intended_interaction)
Пример #29
0
from appstate import AppState
import bloom

from krpc.krpc import KRPC
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor

# h1 = Hash(hashlib.sha1("test").digest())
# h2 = Hash(hashlib.sha1("test").digest())
# h3 = Hash(hashlib.sha1("test2").digest())
# print h2.distance(h3)

AppState.prepare()
reactor.listenUDP(AppState.thisNode.port(), KRPC())
#reactor.callLater(3, AppState.routingTable.refresh)
reactor.callLater(3, AppState.routingTable.refresh, (reactor))
reactor.run()
print('test')
Пример #30
0
 def get_cell_height(self):
     return round(float(self.surface.get_height()) / AppState.get_state().get_world().get_height())
Пример #31
0
    def enacted_interaction(self, interaction_, data):
        self.enacting_interaction_step += 1
        intended_primitive_interaction = data

        self.enacted_sequence.append(interaction_)

        # Learn interaction if it is not yet known
        if interaction_ not in self.interaction_memory.get_primitive_interactions():
            self.interaction_memory.add_interaction(interaction_)

        # Post enacted interaction event
        AppState.state.get_event_manager().post_event(events.AgentEnactionEvent(
            self, 
            interaction_, 
            self.interaction_memory.get_valence(interaction_)))
        self.interaction_memory.add_interaction_to_history(interaction_)

        if (
            not interaction_ == intended_primitive_interaction
            or
            self.enacting_interaction_step >= len(self.enacting_interaction_sequence)
            ):
            # Failed or done enacting
            self.enacting_interaction = False

            # Reconstruct enacted interaction from hierarchy of intended
            # interaction
            enacted = self.intended_interaction.reconstruct_from_hierarchy(self.enacted_sequence)
            AppState.get_state().get_logger().info("%s - Enacted: %s" % (self.name, enacted))

            # Add the interaction as an alternative interaction if the intended interaction failed
            if enacted != self.intended_interaction:
                if self.interaction_memory.add_alternative_interaction(self.intended_interaction, enacted):
                    AppState.get_state().get_logger().info("%s - Interaction added as alternative" % self.name)

            # Step 5: add new or reinforce existing composite interactions
            learned_or_reinforced = []
            if isinstance(enacted, interaction.CompositeInteraction):
                learned_or_reinforced.append(enacted)

            if len(self.history) >= 1:
                previous = self.history[-1]
                # <interaction at t-1, enacted interaction>
                t1enacted = interaction.CompositeInteraction(previous, enacted)
                learned_or_reinforced.append(t1enacted)

                if len(self.history) >= 2:
                    penultimate = self.history[-2]
                    # <interaction at t-2, interaction at t-1>
                    t2t1 = interaction.CompositeInteraction(penultimate, previous)

                    # <<interaction at t-2, interaction at t-1>, enacted interaction>
                    t2t1_enacted = interaction.CompositeInteraction(t2t1, enacted)
                    learned_or_reinforced.append(t2t1_enacted)

                    # <interaction at t-2, <interaction at t-1, enacted interaction>>
                    t2_t1enacted = interaction.CompositeInteraction(penultimate, t1enacted)
                    learned_or_reinforced.append(t2_t1enacted)
            for composite in learned_or_reinforced:
                if composite not in self.interaction_memory.get_composite_interactions():
                    self.interaction_memory.add_interaction(composite)
                else:
                    self.interaction_memory.increment_weight(composite)
                    
            # Keep history of last 100 actions performed
            if len(self.history) > 100:
                self.history.pop(0)
            self.history.append(enacted)

            """
            According to the paper:

            for pre_interaction in self.context:
                composite = interaction.CompositeInteraction(pre_interaction, enacted)
                learned_or_reinforced.append(composite)
                if composite not in self.interaction_memory.get_composite_interactions():
                    self.interaction_memory.add_interaction(composite)
                else:
                    self.interaction_memory.increment_weight(composite)
            """

            # Step 6: update context
            self.update_context(enacted, learned_or_reinforced)
        else: 
            # Not done
            pass
Пример #32
0
    def run(self):
        """
        Process PyGame events until halt is true.
        """

        self.halt = False

        print("Starting heartbeat.")
        time_elapsed = 0
        while True:

            AppState.get_state().get_event_manager().post_event(
                events.ControlEvent())

            ticked = False

            if AppState.get_state().is_running(
            ) and time_elapsed >= settings.SIMULATION_STEP_TIME:
                print "------- t = %s" % AppState.get_state().get_t()
                AppState.get_state().get_event_manager().post_event(
                    events.TickEvent())
                time_elapsed = 0
                ticked = True

            AppState.get_state().get_event_manager().post_event(
                events.DrawEvent(
                    ticked
                    and AppState.get_state().get_save_simulation_renders()))

            time_elapsed += AppState.get_state().get_clock().tick(
                settings.MAX_FPS)

            if ticked:
                AppState.get_state().increment_t()
Пример #33
0
    def run(self, slow = True, halt_fun = None, metrics_fun = None):
        """
        Process PyGame events until halt is true.

        :param slow: whether the simulation should be slowed for
                     visible ticks and renders.
        :param halt_fun: A callable taking as input the current
                         simulation time in ticks, and returning
                         a boolean indicating whether the simulation
                         should halt.
        :param metrics_fun: A callable returning a dictionary of named
                            metrics.
        """

        self.metrics = []

        self.halt = False

        print("Starting heartbeat.")
        time_elapsed = 0
        while not self.halt:
            if callable(halt_fun) and halt_fun(AppState.get_state().get_t()):
                self.halt = True
                continue

            AppState.get_state().get_event_manager().post_event(events.ControlEvent())

            ticked = False
            
            if not slow or (AppState.get_state().is_running() and time_elapsed >= settings.SIMULATION_STEP_TIME):
                AppState.get_state().get_logger().info("------- t = %s" % AppState.get_state().get_t())

                AppState.get_state().get_event_manager().post_event(events.TickEvent())
                time_elapsed = 0
                ticked = True

                if callable(metrics_fun):
                    self.metrics.append(metrics_fun())

            AppState.get_state().get_event_manager().post_event(events.DrawEvent(ticked and AppState.get_state().get_save_simulation_renders()))

            if slow:
                time_elapsed += AppState.get_state().get_clock().tick(settings.MAX_FPS)

            if ticked:
                AppState.get_state().increment_t()
Пример #34
0
def run_experiment(experiment_, render = True, interactive = True, console_output = True, save_logs = True):
    """
    Run an experiment until it halts. Simulates the world defined 
    by the experiment and handles control events.
    
    :param experiment_: An object of type Experiment.
    :param render: A boolean indicating whether the simulation is
                   to be rendered to the screen.
    :param interactive: A boolean indicating whether interactive
                        is to be enabled. If interactive mode is
                        on, rendering should be on as well.
    :param console_output: A boolean indicating whether simulation
                           output is to be displayed in the console.
    :param save_logs: A boolean indicating whether simulation output
                      is to be saved in a log file.
    """

    if interactive:
        assert render, "render must be true if interactive mode is set"

    # Reset the app state
    AppState.get_state().reset()

    # Initialize the event manager.
    event_manager = events.EventManager()
    AppState.get_state().set_event_manager(event_manager)

    # Initialize and register the application heartbeat.
    heart_beat = HeartBeat()
    event_manager.register_listener(heart_beat)

    # Initialize and register the world.
    AppState.get_state().set_experiment(experiment_)
    world = experiment_.get_world()
    event_manager.register_listener(world)
    AppState.get_state().set_world(world)

    # Initialize pygame.
    surface = init()

    if render:
        # Initialize and register the view.
        main_view = view.View(surface)
        event_manager.register_listener(main_view)

    # Initialize the website trace history view.
    trace_view = agentevents.AgentEvents()
    event_manager.register_listener(trace_view)

    # Initialize and register the controller.
    main_controller = controller.Controller()
    event_manager.register_listener(main_controller)

    if interactive:
        # Add the experiment controller to the controller
        main_controller.set_experiment_controller(lambda e, coords: experiment_.controller(e, main_view.window_coords_to_world_coords(coords)))

    if console_output:
        # Enable console logger
        AppState.get_state().enable_console_logger()

    if save_logs:
        # Store experiment logs
        if not os.path.isdir(settings.RESULTS_DIR):
            os.makedirs(settings.RESULTS_DIR)
        file_path = os.path.join(settings.RESULTS_DIR, "%s - %s.log" % (strftime("%Y%m%dT%H%M%S"), experiment_.__class__.__name__))
        AppState.get_state().enable_file_logger(file_path)

    # Start the webserver.
    webserver.register({'traces': trace_view})
    webserver.start()

    # Start the heartbeat.
    heart_beat.run(slow = render, halt_fun = experiment_.halt, metrics_fun = experiment_.calculate_metrics)

    if len(heart_beat.metrics) > 0:
        # Store experiment results
        if not os.path.isdir(settings.RESULTS_DIR):
            os.makedirs(settings.RESULTS_DIR)

        file_path = os.path.join(settings.RESULTS_DIR, "%s - %s.json" % (strftime("%Y%m%dT%H%M%S"), experiment_.__class__.__name__))
        with open(file_path, 'w') as f:
            json.dump(heart_beat.metrics, f, indent=4, sort_keys=True)
Пример #35
0
def render_content(state_json):
    state = AppState.from_json(json.loads(state_json))
    return views.create_summary_graph(x_axis_years=state.x_axis_years,
                                      renting_capital=state.renting_capital,
                                      property_value=state.property_value)
Пример #36
0
def update_output(
    # Rent
    initial_rent_n_submit,
    initial_rent_n_blur,
    inflation_rate_n_submit,
    inflation_rate_n_blur,
    # Property
    property_sale_price_n_submit,
    property_sale_price_n_blur,
    property_tax_n_submit,
    property_tax_n_blur,
    condo_fee_n_submit,
    condo_fee_n_blur,
    insurance_n_submit,
    insurance_n_blur,
    utility_cost_n_submit,
    utility_cost_n_blur,
    property_appreciation_n_submit,
    property_appreciation_n_blur,
    # Mortgage
    mortgage_load_n_submit,
    mortgage_load_n_blur,
    mortgage_interest_rate_n_submit,
    mortgage_interest_rate_n_blur,
    mortgage_terms,
    mortgage_payments_per_year,
    # Purchase Upfront Cost
    mortgage_down_payment_n_submit,
    mortgage_down_payment_n_blur,
    welcome_tax_n_submit,
    welcome_tax_n_blur,
    legal_fee_n_submit,
    legal_fee_n_blur,
    # Other investments
    investment_roi_n_submit,
    investment_roi_n_blur,
    initial_rent,
    inflation_rate,
    property_tax,
    condo_fee,
    insurance,
    utility_cost,
    property_appreciation,
    mortgage_loan,
    mortgage_interest_rate,
    mortgage_down_payment,
    welcome_tax,
    legal_fee,
    other_investment_roi,
):
    logger.log(
        logging.INFO,
        msg="Update output: " + f"\n\tinitial_rent={initial_rent}" +
        f"\n\tinflation_rate={inflation_rate}" +
        f"\n\tproperty_tax={property_tax}" + f"\n\tcondo_fee={condo_fee}" +
        f"\n\tinsurance={insurance}" + f"\n\tutility_cost={utility_cost}" +
        f"\n\tproperty_appreciation={property_appreciation}" +
        f"\n\tmortgage_loan={mortgage_loan}" +
        f"\n\tmortgage_interest_rate={mortgage_interest_rate}" +
        f"\n\tmortgage_terms={mortgage_terms}" +
        f"\n\tmortgage_payments_per_year={mortgage_payments_per_year}" +
        f"\n\tmortgage_down_payment={mortgage_down_payment}" +
        f"\n\twelcome_tax={welcome_tax}" + f"\n\tlegal_fee={legal_fee}" +
        f"\n\tother_investment_roi={other_investment_roi}")

    inflation_rate = inflation_rate / 100
    mortgage_interest_rate = mortgage_interest_rate / 100
    property_appreciation = property_appreciation / 100
    other_investment_roi = other_investment_roi / 100

    number_of_years = mortgage_terms

    monthly_property_owning_cost = property_tax + condo_fee + insurance + utility_cost

    mortgage = Mortgage.create(loan=mortgage_loan,
                               annual_interest_rate=mortgage_interest_rate,
                               amortization_period=number_of_years,
                               annual_payment_count=mortgage_payments_per_year)

    rent = Rent.create_rent(initial_monthly_rent=initial_rent,
                            inflation_rate=inflation_rate,
                            number_of_years=number_of_years)

    initial_capital = mortgage_down_payment + welcome_tax + legal_fee
    renting_capital = RentingCapital.create(
        initial_capital=initial_capital,
        return_on_investment=other_investment_roi,
        monthly_property_owning_cost=monthly_property_owning_cost,
        mortgage=mortgage,
        rent=rent,
        number_of_years=number_of_years)

    property_initial_value = mortgage_down_payment + mortgage_loan
    property_value = PropertyValue.create(
        initial_value=property_initial_value,
        appreciation_rate=property_appreciation,
        mortgage=mortgage,
        number_of_years=number_of_years,
        real_estate_commission=0.05)

    cache = AppState(number_of_years=number_of_years,
                     rent=rent,
                     property_value=property_value,
                     renting_capital=renting_capital,
                     mortgage=mortgage)

    return cache.dump()
Пример #37
0
    def save_agent(self):
        """
        Save all agents to files.
        """
        import json
        import utilities.customjsonencoder
        try:
            import dill
        except ImportError:
            print "ERROR: Module 'dill' is required to save agents."

        print "---"
        print "Press [enter] to write all agents to a pickle file, [shift]+[enter] to write all agents to both a pickle and json file, or [escape] to cancel."

        export_json = False

        while True:
            event = pygame.event.wait()
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_ESCAPE:
                    print "Saving cancelled."
                    print "---"
                    return
                elif event.key == pygame.K_RETURN:
                    if pygame.key.get_pressed()[
                            pygame.K_LSHIFT] or pygame.key.get_pressed()[
                                pygame.K_RSHIFT]:
                        export_json = True
                    break

        print "Saving agents to file..."

        # Create output directory if it does not exist
        if not os.path.exists(settings.AGENT_DIR):
            os.makedirs(settings.AGENT_DIR)

        # Pickle and save agents to file
        agents = AppState.get_state().get_world().get_agents()
        for agent in agents:
            if export_json:
                file_name = "%s - %s.json" % (strftime("%Y%m%dT%H%M%S"),
                                              agent.get_name())
                file_path = os.path.join(settings.AGENT_DIR, file_name)

                print " - Saving %s to %s" % (agent.get_name(), file_path)
                with open(file_path, 'w') as f:
                    json.dump(
                        agent,
                        f,
                        cls=utilities.customjsonencoder.CustomJSONEncoder,
                        indent=4,
                        sort_keys=True)

            file_name = "%s - %s.p" % (strftime("%Y%m%dT%H%M%S"),
                                       agent.get_name())
            file_path = os.path.join(settings.AGENT_DIR, file_name)

            print " - Saving %s to %s" % (agent.get_name(), file_path)
            dill.dump(agent, open(file_path, "wb"))

        print "Agents saved."

        print "---"
Пример #38
0
    def enacted_interaction(self, interaction_, data):
        self.enacting_interaction_step += 1
        intended_primitive_interaction = data

        self.enacted_sequence.append(interaction_)

        # Learn interaction if it is not yet known
        if interaction_ not in self.interaction_memory.get_primitive_interactions(
        ):
            self.interaction_memory.add_interaction(interaction_)

        # Post enacted interaction event
        AppState.state.get_event_manager().post_event(
            events.AgentEnactionEvent(
                self, interaction_,
                self.interaction_memory.get_valence(interaction_)))
        self.interaction_memory.add_interaction_to_history(interaction_)

        if (not interaction_ == intended_primitive_interaction
                or self.enacting_interaction_step >= len(
                    self.enacting_interaction_sequence)):
            # Failed or done enacting
            self.enacting_interaction = False

            # Reconstruct enacted interaction from hierarchy of intended
            # interaction
            enacted = self.intended_interaction.reconstruct_from_hierarchy(
                self.enacted_sequence)
            AppState.get_state().get_logger().info("%s - Enacted: %s" %
                                                   (self.name, enacted))

            # Add the interaction as an alternative interaction if the intended interaction failed
            if enacted != self.intended_interaction:
                if self.interaction_memory.add_alternative_interaction(
                        self.intended_interaction, enacted):
                    AppState.get_state().get_logger().info(
                        "%s - Interaction added as alternative" % self.name)

            # Step 5: add new or reinforce existing composite interactions
            learned_or_reinforced = []
            if isinstance(enacted, interaction.CompositeInteraction):
                learned_or_reinforced.append(enacted)

            if len(self.history) >= 1:
                previous = self.history[-1]
                # <interaction at t-1, enacted interaction>
                t1enacted = interaction.CompositeInteraction(previous, enacted)
                learned_or_reinforced.append(t1enacted)

                if len(self.history) >= 2:
                    penultimate = self.history[-2]
                    # <interaction at t-2, interaction at t-1>
                    t2t1 = interaction.CompositeInteraction(
                        penultimate, previous)

                    # <<interaction at t-2, interaction at t-1>, enacted interaction>
                    t2t1_enacted = interaction.CompositeInteraction(
                        t2t1, enacted)
                    learned_or_reinforced.append(t2t1_enacted)

                    # <interaction at t-2, <interaction at t-1, enacted interaction>>
                    t2_t1enacted = interaction.CompositeInteraction(
                        penultimate, t1enacted)
                    learned_or_reinforced.append(t2_t1enacted)
            for composite in learned_or_reinforced:
                if composite not in self.interaction_memory.get_composite_interactions(
                ):
                    self.interaction_memory.add_interaction(composite)
                else:
                    self.interaction_memory.increment_weight(composite)

            # Keep history of last 100 actions performed
            if len(self.history) > 100:
                self.history.pop(0)
            self.history.append(enacted)
            """
            According to the paper:

            for pre_interaction in self.context:
                composite = interaction.CompositeInteraction(pre_interaction, enacted)
                learned_or_reinforced.append(composite)
                if composite not in self.interaction_memory.get_composite_interactions():
                    self.interaction_memory.add_interaction(composite)
                else:
                    self.interaction_memory.increment_weight(composite)
            """

            # Step 6: update context
            self.update_context(enacted, learned_or_reinforced)
        else:
            # Not done
            pass