Exemple #1
0
    def __init__(self, name, team, index, config_file=None):
        self.last_frame_time = None
        self.config_file = config_file
        self.index = index
        self.load_config_file()
        self.inp = InputFormatter(team, index)
        self.reward_manager = reward_manager.RewardManager()
        config = tf.ConfigProto(device_count={'GPU': 0})
        self.sess = tf.Session(config=config)
        # self.sess = tf.Session()
        self.actions_handler = action_factory.get_handler(
            control_scheme=dynamic_action_handler.super_split_scheme)
        self.state_dim = input_formatter.get_state_dim()
        self.num_actions = self.actions_handler.get_logit_size()
        print('num_actions', self.num_actions)
        self.model = self.get_model_class()(
            self.sess,
            self.state_dim,
            self.num_actions,
            player_index=self.index,
            action_handler=self.actions_handler,
            config_file=config_file,
            is_training=False)

        writer = self.model.summary_writer = tf.summary.FileWriter(
            self.model.get_event_path('random_packet', is_replay=True))

        self.model.summary_writer = writer
        self.model.batch_size = 1
        self.model.mini_batch_size = 1

        self.model.is_graphing = self.is_graphing

        self.model.is_online_training = self.is_online_training

        # self.model.apply_feature_creation(TensorflowFeatureCreator())

        try:
            self.model.create_model(self.model.input_placeholder)
        except TypeError as e:
            raise Exception('failed to create model') from e

        if self.model.is_training and self.model.is_online_training:
            self.model.create_reinforcement_training_model()

        self.model.create_savers()

        self.model.initialize_model()
        if self.is_graphing:
            self.rotating_real_reward_buffer = live_data_util.RotatingBuffer(
                self.index + 10)
Exemple #2
0
    def end_file(self):
        self.batch_process()
        if self.file_frame_count == 0:
            return
        per_frame_award = self.file_reward / float(self.file_frame_count)
        if not self.current_file in self.eval_compare:
            self.eval_compare[self.current_file] = []
        self.eval_compare[self.current_file].append(per_frame_award)
        print('Reward for file:', self.file_reward)
        print('Reward per frame:', per_frame_award)

        self.file_reward = 0
        self.file_frame_count = 0
        self.reward_manager = reward_manager.RewardManager()
Exemple #3
0
 def start_new_file(self):
     self.file_number += 1
     self.last_action = None
     self.reward_manager = reward_manager.RewardManager()
Exemple #4
0
 def __init__(self):
     self.file_reward = 0
     self.file_frame_count = 0
     self.total_reward = 0
     self.frame_count = 0
     self.reward_manager = reward_manager.RewardManager()