Beispiel #1
0
    def visualize_world(self, brain):
        """ 
        Show what's going on in the world.
        """
        if self.print_features:
            projections, activities = brain.cortex.get_index_projections(to_screen=True)
            wtools.print_pixel_array_features(
                    projections, 
                    self.fov_span ** 2 * 2, 
                    0,#self.num_actions,
                    self.fov_span, self.fov_span, 
                    world_name=self.name)

        # Periodically show the state history and inputs as perceived by BECCA
        print ''.join(["world is ", str(self.timestep), " timesteps old"])
        fig = plt.figure(11)
        plt.clf()
        plt.plot( self.column_history, 'k.')    
        plt.title(''.join(['Column history for ', self.name]))
        plt.xlabel('time step')
        plt.ylabel('position (pixels)')
        fig.show()
        fig.canvas.draw()

        fig  = plt.figure(12)
        sensed_image = np.reshape(
                0.5 * (self.sensors[:len(self.sensors)/2] - 
                       self.sensors[len(self.sensors)/2:] + 1), 
                (self.fov_span, self.fov_span))
        plt.gray()
        plt.imshow(sensed_image, interpolation='nearest')
        plt.title("Image sensed")
        fig.show()
        fig.canvas.draw()
Beispiel #2
0
    def visualize(self, agent):
        """ Show what is going on in BECCA and in the world """
        self.row_history.append(self.row_position)
        self.column_history.append(self.column_position)
        if self.animate:
            print ''.join([
                str(self.row_position), ', ',
                str(self.column_position), '-row and col position  ',
                str(self.reward), '-reward'
            ])
        # Periodically display the history and inputs as perceived by BECCA
        if (self.timestep % self.VISUALIZE_PERIOD) != 0:
            return

        print ' '.join(["world is", str(self.timestep), "timesteps old."])
        fig = plt.figure(11)
        plt.clf()
        plt.plot(self.row_history, 'k.')
        plt.title("Row history")
        plt.xlabel('time step')
        plt.ylabel('position (pixels)')
        fig.show()
        fig.canvas.draw()

        fig = plt.figure(12)
        plt.clf()
        plt.plot(self.column_history, 'k.')
        plt.title("Column history")
        plt.xlabel('time step')
        plt.ylabel('position (pixels)')
        fig.show()
        fig.canvas.draw()

        fig = plt.figure(13)
        sensed_image = np.reshape(
            0.5 * (self.sensors[:len(self.sensors) / 2] -
                   self.sensors[len(self.sensors) / 2:] + 1),
            (self.fov_span, self.fov_span))
        plt.gray()
        plt.imshow(sensed_image, interpolation='nearest')
        plt.title("Image sensed")
        fig.show()
        fig.canvas.draw()

        # Periodcally show the entire feature set
        if self.print_feature_set:
            (feature_set, feature_activities) = agent.get_index_projections()
            wtools.print_pixel_array_features(feature_set,
                                              self.num_sensors,
                                              self.num_actions,
                                              self.fov_span,
                                              self.fov_span,
                                              directory='log',
                                              world_name=self.name)
Beispiel #3
0
    def visualize(self, agent):
        """ 
        Show what is going on in BECCA and in the world 
        """
        self.row_history.append(self.row_position)
        self.column_history.append(self.column_position)
        if self.animate:
            print ''.join([str(self.row_position), ', ', 
                           str(self.column_position), 
                           '-row and col position  ', 
                           str(self.reward), '-reward'])
        # Periodically display the history and inputs as perceived by BECCA
        if (self.timestep % self.VISUALIZE_PERIOD) != 0:
            return

        print ' '.join(["world is", str(self.timestep), "timesteps old."])
        fig = plt.figure(11)
        plt.clf()
        plt.plot( self.row_history, 'k.')    
        plt.title("Row history")
        plt.xlabel('time step')
        plt.ylabel('position (pixels)')
        fig.show()
        fig.canvas.draw()

        fig = plt.figure(12)
        plt.clf()
        plt.plot( self.column_history, 'k.')    
        plt.title("Column history")
        plt.xlabel('time step')
        plt.ylabel('position (pixels)')
        fig.show()
        fig.canvas.draw()

        fig = plt.figure(13)
        sensed_image = np.reshape(0.5 * (
                self.sensors[:len(self.sensors)/2] - 
                self.sensors[len(self.sensors)/2:] + 1), 
                (self.fov_span, self.fov_span))
        plt.gray()
        plt.imshow(sensed_image, interpolation='nearest')
        plt.title("Image sensed")
        fig.show()
        fig.canvas.draw()

        # Periodcally show the entire feature set 
        if self.print_feature_set:
            (feature_set, feature_activities) = agent.get_index_projections()
            wtools.print_pixel_array_features(feature_set, self.num_sensors,
                                              0,
                                              self.fov_span, self.fov_span,
                                              directory='log', 
                                              world_name=self.name)  
Beispiel #4
0
    def visualize(self, agent):
        if self.animate:
            print ''.join([
                'column_position: ',
                str(self.column_position), '  self.reward: ',
                str(self.reward), '  self.column_step: ',
                str(self.column_step)
            ])
        if not self.graphing:
            return

        # Periodically show the state history and inputs as perceived by BECCA
        self.column_history.append(self.column_position)
        if (self.timestep % self.VISUALIZE_PERIOD) == 0:
            # Periodically show the agent's internal state and reward history
            agent.visualize()

            print ''.join(["world is ", str(self.timestep), " timesteps old"])
            fig = plt.figure(11)
            plt.clf()
            plt.plot(self.column_history, 'k.')
            plt.title(''.join(['Column history for ', self.name]))
            plt.xlabel('time step')
            plt.ylabel('position (pixels)')
            fig.show()
            fig.canvas.draw()

            fig = plt.figure(12)
            sensed_image = np.reshape(
                0.5 * (self.sensors[:len(self.sensors) / 2] -
                       self.sensors[len(self.sensors) / 2:] + 1),
                (self.fov_span, self.fov_span))
            plt.gray()
            plt.imshow(sensed_image, interpolation='nearest')
            plt.title("Image sensed")
            fig.show()
            fig.canvas.draw()
            # Periodically visualize the entire feature set
            if self.print_feature_set:
                (feature_set, feature_activities) = \
                        agent.get_index_projections()
                wtools.print_pixel_array_features(feature_set,
                                                  self.num_sensors,
                                                  self.num_actions,
                                                  self.fov_span,
                                                  self.fov_span,
                                                  directory='log',
                                                  world_name=self.name)
Beispiel #5
0
    def visualize(self, agent):
        if self.animate:
            print ''.join(['column_position: ', str(self.column_position), 
                           '  self.reward: ', str(self.reward), 
                           '  self.column_step: ', str(self.column_step)])
        if not self.graphing:
            return

        # Periodically show the state history and inputs as perceived by BECCA
        self.column_history.append(self.column_position)
        if (self.timestep % self.VISUALIZE_PERIOD) == 0:
            # Periodically show the agent's internal state and reward history
            agent.visualize() 

            print ''.join(["world is ", str(self.timestep), " timesteps old"])
            fig = plt.figure(11)
            plt.clf()
            plt.plot( self.column_history, 'k.')    
            plt.title(''.join(['Column history for ', self.name]))
            plt.xlabel('time step')
            plt.ylabel('position (pixels)')
            fig.show()
            fig.canvas.draw()

            fig  = plt.figure(12)
            sensed_image = np.reshape(
                    0.5 * (self.sensors[:len(self.sensors)/2] - 
                           self.sensors[len(self.sensors)/2:] + 1), 
                    (self.fov_span, self.fov_span))
            plt.gray()
            plt.imshow(sensed_image, interpolation='nearest')
            plt.title("Image sensed")
            fig.show()
            fig.canvas.draw()
            # Periodically visualize the entire feature set
            if self.print_feature_set:
                (feature_set, feature_activities) = \
                        agent.get_index_projections()
                wtools.print_pixel_array_features(feature_set, self.num_sensors,
                                                  self.num_actions, 
                                                  self.fov_span, self.fov_span,
                                                  directory='log', 
                                                  world_name=self.name)
Beispiel #6
0
    def visualize_world(self, brain):
        """ 
        Show what's going on in the world.
        """
        if self.print_features:
            projections, activities = brain.cortex.get_index_projections(
                to_screen=True)
            wtools.print_pixel_array_features(
                projections,
                self.fov_span**2 * 2,
                0,  #self.num_actions,
                self.fov_span,
                self.fov_span,
                world_name=self.name)

        # Periodically show the state history and inputs as perceived by BECCA
        print ''.join(["world is ", str(self.timestep), " timesteps old"])
        fig = plt.figure(11)
        plt.clf()
        plt.plot(self.column_history, 'k.')
        plt.title(''.join(['Column history for ', self.name]))
        plt.xlabel('time step')
        plt.ylabel('position (pixels)')
        fig.show()
        fig.canvas.draw()

        fig = plt.figure(12)
        sensed_image = np.reshape(
            0.5 * (self.sensors[:len(self.sensors) / 2] -
                   self.sensors[len(self.sensors) / 2:] + 1),
            (self.fov_span, self.fov_span))
        plt.gray()
        plt.imshow(sensed_image, interpolation='nearest')
        plt.title("Image sensed")
        fig.show()
        fig.canvas.draw()