def init_variables(self, info):
     # Here you have the information of the game (virtual init() in random_walk.cpp)
     # List: game_time, goal, number_of_robots, penalty_area, codewords,
     #       robot_height, robot_radius, max_linear_velocity, field, team_info,
     #       {rating, name}, axle_length, resolution, ball_radius
     # self.game_time = info['game_time']
     # self.field = info['field']
     self.max_linear_velocity = info['max_linear_velocity']
     self.resolution = info['resolution']
     self.colorChannels = 3  # nf
     self.end_of_frame = False
     self.image = Received_Image(self.resolution, self.colorChannels)
     self.D = []  # Replay Memory
     self.update = 100  # Update Target Network
     self.epsilon = 1.0  # Initial epsilon value
     self.final_epsilon = 0.05  # Final epsilon value
     self.dec_epsilon = 0.05  # Decrease rate of epsilon for every generation
     self.step_epsilon = 20000  # Number of iterations for every generation
     self.observation_steps = 5000  # Number of iterations to observe before training every generation
     self.save_every_steps = 5000  # Save checkpoint
     self.num_actions = 11  # Number of possible possible actions
     self._frame = 0
     self._iterations = 0
     self.minibatch_size = 64
     self.gamma = 0.99
     self.sqerror = 100  # Initial sqerror value
     self.Q = NeuralNetwork(
         None, False, False
     )  # 2nd term: False to start training from scratch, use CHECKPOINT to load a checkpoint
     self.Q_ = NeuralNetwork(self.Q, False, True)
     self.wheels = [0 for _ in range(10)]
     return
Example #2
0
        def init_variables(self, info):
            # Here you have the information of the game (virtual init() in random_walk.cpp)
            # List: game_time, number_of_robots
            #       field, goal, penalty_area, goal_area, resolution Dimension: [x, y]
            #       ball_radius, ball_mass,
            #       robot_size, robot_height, axle_length, robot_body_mass, ID: [0, 1, 2, 3, 4]
            #       wheel_radius, wheel_mass, ID: [0, 1, 2, 3, 4]
            #       max_linear_velocity, max_torque, codewords, ID: [0, 1, 2, 3, 4]
            # self.game_time = info['game_time']
            # self.number_of_robots = info['number_of_robots']

            # self.field = info['field']
            # self.goal = info['goal']
            # self.penalty_area = info['penalty_area']
            # self.goal_area = info['goal_area']
            self.resolution = info['resolution']

            # self.ball_radius = info['ball_radius']
            # self.ball_mass = info['ball_mass']

            # self.robot_size = info['robot_size']
            # self.robot_height = info['robot_height']
            # self.axle_length = info['axle_length']
            # self.robot_body_mass = info['robot_body_mass']

            # self.wheel_radius = info['wheel_radius']
            # self.wheel_mass = info['wheel_mass']

            self.max_linear_velocity = info['max_linear_velocity']
            # self.max_torque = info['max_torque']
            # self.codewords = info['codewords']



            self.total_distance=0
            self.ball_touch=0    
            self.colorChannels = 3 # nf
            self.end_of_frame = False
            self.image = Received_Image(self.resolution, self.colorChannels)
            self.received_frame = Frame()
            self.D = [] # Replay Memory
            self.distance_buffer=[] #distance buffer for reward
            self.update = 100 # Update Target Network
            self.epsilon = 1.0 # Initial epsilon value
            self.final_epsilon = 0.05 # Final epsilon value
            self.dec_epsilon = 0.05 # Decrease rate of epsilon for every generation
            self.step_epsilon = 5000 # Number of iterations for every generation
            self.observation_steps = 1000 # Number of iterations to observe before training every generation
            self.save_every_steps = 1000 # Save checkpoint
            self.num_actions = 11 # Number of possible possible actions
            self._frame = 0
            self._iterations = 0
            self.minibatch_size = 64
            self.gamma = 0.99
            self.sqerror = 100 # Initial sqerror value
            self.Q = NeuralNetwork(None, False, False) # 2nd term: False to start training from scratch, use CHECKPOINT to load a checkpoint
            self.Q_ = NeuralNetwork(self.Q, False, True)
            self.wheels = [0 for _ in range(10)]
            return
 def init_variables(self, info):
     # Here you have the information of the game (virtual init() in random_walk.cpp)
     # List: game_time, goal, number_of_robots, penalty_area, codewords,
     #       robot_height, robot radius, max_linear_velocity, field, team_info,
     #       {rating, name}, axle_length, resolution, ball_radius
     # self.game_time = info['game_time']
     # self.field = info['field']
     self.max_linear_velocity = info['max_linear_velocity']
     self.resolution = info['resolution']
     self.colorChannels = 3 # nf in dqn_main.py
     self.end_of_frame = False
     self.image = Received_Image(self.resolution, self.colorChannels)
     self._frame = 0 
     self.Q = NeuralNetwork(None, CHECKPOINT, False) # 2nd term: False to start training from scratch, use CHECKPOINT to load a checkpoint
     self.wheels = [0 for _ in range(10)]
     return
        def init_variables(self, info):
            # Here you have the information of the game (virtual init() in random_walk.cpp)
            # List: game_time, number_of_robots
            #       field, goal, penalty_area, goal_area, resolution Dimension: [x, y]
            #       ball_radius, ball_mass,
            #       robot_size, robot_height, axle_length, robot_body_mass, ID: [0, 1, 2, 3, 4]
            #       wheel_radius, wheel_mass, ID: [0, 1, 2, 3, 4]
            #       max_linear_velocity, max_torque, codewords, ID: [0, 1, 2, 3, 4]
            # self.game_time = info['game_time']
            # self.number_of_robots = info['number_of_robots']

            # self.field = info['field']
            # self.goal = info['goal']
            # self.penalty_area = info['penalty_area']
            # self.goal_area = info['goal_area']
            self.resolution = info['resolution']

            # self.ball_radius = info['ball_radius']
            # self.ball_mass = info['ball_mass']

            # self.robot_size = info['robot_size']
            # self.robot_height = info['robot_height']
            # self.axle_length = info['axle_length']
            # self.robot_body_mass = info['robot_body_mass']

            # self.wheel_radius = info['wheel_radius']
            # self.wheel_mass = info['wheel_mass']

            self.max_linear_velocity = info['max_linear_velocity']
            # self.max_torque = info['max_torque']
            # self.codewords = info['codewords']

            self.colorChannels = 3  # nf in dqn_main.py
            self.end_of_frame = False
            self.image = Received_Image(self.resolution, self.colorChannels)
            self._frame = 0
            self.Q = NeuralNetwork(
                None, CHECKPOINT, False
            )  # 2nd term: False to start training from scratch, use CHECKPOINT to load a checkpoint
            self.wheels = [0 for _ in range(10)]
            return