Beispiel #1
0
    def __init__(self, trainer_config_path, model_path):

        self.brain = BrainParameters(
            brain_name='Learner',
            camera_resolutions=[{
                'height': 84,
                'width': 84,
                'blackAndWhite': False
            }],
            num_stacked_vector_observations=1,
            vector_action_descriptions=['', ''],
            vector_action_space_size=[3, 3],
            vector_action_space_type=0,  # corresponds to discrete
            vector_observation_space_size=3)

        self.trainer_params = yaml.load(open(trainer_config_path))['Learner']
        self.trainer_params['keep_checkpoints'] = 0
        self.trainer_params['model_path'] = model_path
        self.trainer_params['use_recurrent'] = False

        self.policy = PPOPolicy(brain=self.brain,
                                seed=0,
                                trainer_params=self.trainer_params,
                                is_training=False,
                                load=True)
Beispiel #2
0
    def __init__(self):
        """
         Load your agent here and initialize anything needed
        """

        # Load the configuration and model using ABSOLUTE PATHS
        self.configuration_file = '/aaio/data/trainer_config.yaml'
        self.model_path = '/aaio/data/1-Food/Learner'

        self.brain = BrainParameters(
            brain_name='Learner',
            camera_resolutions=[{
                'height': 84,
                'width': 84,
                'blackAndWhite': False
            }],
            num_stacked_vector_observations=1,
            vector_action_descriptions=['', ''],
            vector_action_space_size=[3, 3],
            vector_action_space_type=0,  # corresponds to discrete
            vector_observation_space_size=3)
        self.trainer_params = yaml.load(open(
            self.configuration_file))['Learner']
        self.trainer_params['keep_checkpoints'] = 0
        self.trainer_params['model_path'] = self.model_path
        self.trainer_params['use_recurrent'] = False

        self.policy = PPOPolicy(brain=self.brain,
                                seed=0,
                                trainer_params=self.trainer_params,
                                is_training=False,
                                load=True)
Beispiel #3
0
    def __init__(self):
        """
         Load your agent here and initialize anything needed
        """

        # Load the configuration and model using ABSOLUTE PATHS
        self.configuration_file = '/aaio/AnimalAI-Olympics/examples/configs/trainer_config.yaml'
        self.model_path = '/aaio/AnimalAI-Olympics/examples/models/train_example/Learner'

        self.brain = BrainParameters(brain_name='Learner',
                                     camera_resolutions=[{'height': 84, 'width': 84, 'blackAndWhite': False}],
                                     num_stacked_vector_observations=1,
                                     vector_action_descriptions=['', ''],
                                     vector_action_space_size=[3, 3],
                                     vector_action_space_type=0,  # corresponds to discrete
                                     vector_observation_space_size=3
                                     )
        self.trainer_params = yaml.load(open(self.configuration_file))['Learner']
        self.trainer_params['keep_checkpoints'] = 0
        self.trainer_params['model_path'] = self.model_path
        self.trainer_params['use_recurrent'] = False

        self.policy = PPOPolicy(brain=self.brain,
                                seed=0,
                                trainer_params=self.trainer_params,
                                is_training=False,
                                load=True)

        self.memory = np.zeros( (15, 84, 84, 3) ) # keep 15 frames for prediction
        self.internal_model = load_model("model.h5")
Beispiel #4
0
def load_demonstration(file_path):
    """
    Loads and parses a demonstration file.
    :param file_path: Location of demonstration file (.demo).
    :return: BrainParameter and list of BrainInfos containing demonstration data.
    """

    # First 32 bytes of file dedicated to meta-data.
    INITIAL_POS = 33

    if not os.path.isfile(file_path):
        raise FileNotFoundError(
            "The demonstration file {} does not exist.".format(file_path))
    file_extension = pathlib.Path(file_path).suffix
    if file_extension != '.demo':
        raise ValueError(
            "The file is not a '.demo' file. Please provide a file with the "
            "correct extension.")

    brain_params = None
    brain_infos = []
    data = open(file_path, "rb").read()
    next_pos, pos, obs_decoded = 0, 0, 0
    total_expected = 0
    while pos < len(data):
        next_pos, pos = _DecodeVarint32(data, pos)
        if obs_decoded == 0:
            meta_data_proto = DemonstrationMetaProto()
            meta_data_proto.ParseFromString(data[pos:pos + next_pos])
            total_expected = meta_data_proto.number_steps
            pos = INITIAL_POS
        if obs_decoded == 1:
            brain_param_proto = BrainParametersProto()
            brain_param_proto.ParseFromString(data[pos:pos + next_pos])
            brain_params = BrainParameters.from_proto(brain_param_proto)
            pos += next_pos
        if obs_decoded > 1:
            agent_info = AgentInfoProto()
            agent_info.ParseFromString(data[pos:pos + next_pos])
            brain_info = BrainInfo.from_agent_proto([agent_info], brain_params)
            brain_infos.append(brain_info)
            if len(brain_infos) == total_expected:
                break
            pos += next_pos
        obs_decoded += 1
    return brain_params, brain_infos, total_expected
Beispiel #5
0
    def __init__(self):
        """
         Load your agent here and initialize anything needed
        """

        # Load the configuration and model using ABSOLUTE PATHS
        self.configuration_file = '/aaio/data/trainer_config.yaml'
        self.model_path = glob.glob('/aaio/data/*/Learner')[0]
        print('model_path', self.model_path)

        self.brain = BrainParameters(
            brain_name='Learner',
            camera_resolutions=[{
                'height': 84,
                'width': 84,
                'blackAndWhite': False
            }],
            num_stacked_vector_observations=1,
            vector_action_descriptions=['', ''],
            vector_action_space_size=[3, 3],
            vector_action_space_type=0,  # corresponds to discrete
            vector_observation_space_size=3)
        self.trainer_params = yaml.load(open(
            self.configuration_file))['Learner']
        self.trainer_params['keep_checkpoints'] = 0
        self.trainer_params['model_path'] = self.model_path

        self.policy = PPOPolicy(brain=self.brain,
                                seed=0,
                                trainer_params=self.trainer_params,
                                is_training=False,
                                load=True)
        self.memory_in = None
        self.use_recurrent = self.trainer_params['model_architecture'][
            'use_recurrent']
        self._arena_map = None
        if self.trainer_params['model_architecture']['architecture'] in [
                'map', 'wba_prize'
        ]:
            self._map_side = self.trainer_params['model_architecture'][
                'map_encoding']['map_side']
        else:
            self._map_side = None
Beispiel #6
0
    def __init__(self, trainer_config_path, model_path):

        self.brain = BrainParameters(
            brain_name='Learner',
            camera_resolutions=[{
                'height': 84,
                'width': 84,
                'blackAndWhite': False
            }],
            num_stacked_vector_observations=1,
            vector_action_descriptions=['', ''],
            vector_action_space_size=[3, 3],
            vector_action_space_type=0,  # corresponds to discrete
            vector_observation_space_size=3)

        if ENABLE_VISITED_MAP_IMAGE:
            self.brain = add_extra_camera_parameter(
                self.brain, USE_FIXED_VISITED_MAP_COORDINATE,
                USE_LIDAR_VECTOR_INFO)
            self.extra_brain_info = ExtraBrainInfo()
        else:
            self.extra_brain_info = None

        self.trainer_params = yaml.load(open(trainer_config_path))['Learner']
        self.trainer_params['keep_checkpoints'] = 0
        self.trainer_params['model_path'] = model_path

        self.policy = PPOPolicy(brain=self.brain,
                                seed=0,
                                trainer_params=self.trainer_params,
                                is_training=False,
                                load=True)

        self.lidar_estimator = MultiLidarEstimator(
            save_dir="saved_lidar",  # データパスの指定
            n_arenas=1)