Exemplo n.º 1
0
def train_eval(get_signature_spec_fn=None,
               agent_name='behavioral_cloning',
               num_iterations=100,
               batch_size=64,
               train_sequence_length=1):
    """Train for LLVM inliner."""
    root_dir = os.path.expanduser(FLAGS.root_dir)
    root_dir = os.path.normpath(root_dir)

    # Initialize trainer and policy saver.
    time_step_spec, action_spec = get_signature_spec_fn()
    tf_agent = agent_creators.create_agent(agent_name, time_step_spec,
                                           action_spec)
    llvm_trainer = trainer.Trainer(root_dir=root_dir, agent=tf_agent)
    policy_dict = {
        'saved_policy': tf_agent.policy,
        'saved_collect_policy': tf_agent.collect_policy,
    }
    saver = policy_saver.PolicySaver(policy_dict=policy_dict)

    tfrecord_iterator_fn = data_reader.create_tfrecord_iterator_fn(
        agent_name=agent_name,
        time_step_spec=time_step_spec,
        action_spec=action_spec,
        batch_size=batch_size,
        train_sequence_length=train_sequence_length)

    # Train.
    if FLAGS.data_path:
        dataset_iter = tfrecord_iterator_fn(FLAGS.data_path)
        monitor_dict = {}
        llvm_trainer.train(dataset_iter, monitor_dict, num_iterations)

    # Save final policy.
    saver.save(root_dir)
Exemplo n.º 2
0
    def test_save_policy(self):
        test_agent = behavioral_cloning_agent.BehavioralCloningAgent(
            self._time_step_spec, self._action_spec, self._network,
            tf.compat.v1.train.AdamOptimizer())
        policy_dict = {
            'saved_policy': test_agent.policy,
            'saved_collect_policy': test_agent.collect_policy
        }
        test_policy_saver = policy_saver.PolicySaver(policy_dict=policy_dict)

        root_dir = self.get_temp_dir()
        test_policy_saver.save(root_dir)

        sub_dirs = tf.io.gfile.listdir(root_dir)
        self.assertCountEqual(['saved_policy', 'saved_collect_policy'],
                              sub_dirs)

        for sub_dir in ['saved_policy', 'saved_collect_policy']:
            self.assertTrue(
                tf.io.gfile.exists(
                    os.path.join(root_dir, sub_dir, 'saved_model.pb')))
            self.assertTrue(
                tf.io.gfile.exists(
                    os.path.join(root_dir, sub_dir,
                                 'variables/variables.data-00000-of-00001')))
            output_signature_fn = os.path.join(root_dir, sub_dir,
                                               'output_spec.json')
            self.assertTrue(tf.io.gfile.exists(output_signature_fn))
            self.assertEqual([{
                'logging_name': 'inlining_decision',
                'tensor_spec': {
                    'name': 'StatefulPartitionedCall',
                    'port': 0,
                    'type': 'int64_t',
                    'shape': [1],
                }
            }], json.loads(tf.io.gfile.GFile(output_signature_fn).read()))
Exemplo n.º 3
0
def train_eval(agent_name='ppo',
               warmstart_policy_dir=None,
               num_policy_iterations=0,
               num_iterations=100,
               batch_size=64,
               train_sequence_length=1,
               deploy_policy_name='saved_policy'):
    """Train for LLVM inliner."""
    root_dir = FLAGS.root_dir

    # Initialize trainer and policy saver.
    time_step_spec, action_spec = config.create_signature_specs(config.CONFIG)
    tf_agent = agent_creators.create_agent(agent_name, time_step_spec,
                                           action_spec)
    llvm_trainer = trainer.Trainer(root_dir=root_dir, agent=tf_agent)
    policy_dict = {
        'saved_policy': tf_agent.policy,
        'saved_collect_policy': tf_agent.collect_policy,
    }
    saver = policy_saver.PolicySaver(policy_dict=policy_dict)

    if warmstart_policy_dir:
        warmstart_policy = policy_loader.load(warmstart_policy_dir)
        tf_agent.policy.update(policy=warmstart_policy,
                               tau=1.0,
                               tau_non_trainable=None,
                               sort_variables_by_name=False)

    with open(os.path.join(FLAGS.data_path, 'module_paths'), 'r') as f:
        module_paths = [
            os.path.join(FLAGS.data_path, name.rstrip('\n')) for name in f
        ]
        file_paths = [(path + '.bc', path + '.cmd') for path in module_paths]

    runner = inlining_runner.InliningRunner(
        clang_path=FLAGS.clang_path, llvm_size_path=FLAGS.llvm_size_path)

    sequence_example_iterator_fn = (
        data_reader.create_sequence_example_iterator_fn(
            agent_name=agent_name,
            config=config.CONFIG,
            batch_size=batch_size,
            train_sequence_length=train_sequence_length))

    data_collector = local_data_collector.LocalDataCollector(
        file_paths=file_paths,
        num_workers=FLAGS.num_workers,
        num_modules=FLAGS.num_modules,
        runner=runner.collect_data,
        parser=sequence_example_iterator_fn)

    for policy_iteration in range(num_policy_iterations):
        policy_path = os.path.join(root_dir, 'policy', str(policy_iteration))
        saver.save(policy_path)

        dataset_iter = data_collector.collect_data(
            policy_path=os.path.join(policy_path, deploy_policy_name))
        llvm_trainer.train(dataset_iter, num_iterations)

        data_collector.on_dataset_consumed(dataset_iter)

    # Save final policy.
    saver.save(root_dir)