예제 #1
0
    def make_policy():
      """Returns one copy of the model."""
      artifact = {}
      if cfg.intruction_repr == 'language':
        trainable_encoder = cfg.trainable_encoder
        print('The encoder is trainable: {}'.format(trainable_encoder))
        embedding = tf.get_variable(
            name='word_embedding',
            shape=(cfg.vocab_size, cfg.embedding_size),
            dtype=tf.float32,
            trainable=trainable_encoder)
        _, goal_embedding = encoder(
            self.word_inputs,
            embedding,
            cfg.encoder_n_unit,
            trainable=trainable_encoder)
        artifact['embedding'] = embedding
      elif cfg.intruction_repr == 'one_hot':
        print('Goal input for one-hot max len {}'.format(
            cfg.max_sequence_length))
        one_hot_goal = tf.one_hot(self.word_inputs, cfg.max_sequence_length)
        one_hot_goal.set_shape([None, cfg.max_sequence_length])
        layer_cfg = [cfg.max_sequence_length // 8, cfg.encoder_n_unit]
        goal_embedding = stack_dense_layer(one_hot_goal, layer_cfg)
      else:
        raise ValueError('Unrecognized instruction type: {}'.format(
            cfg.instruction_repr))
      artifact['goal_embedding'] = goal_embedding
      all_q = self.build_q_factor_discrete(cfg, goal_embedding)

      predict_action = tf.argmax(all_q, axis=-1)
      action = tf.placeholder(shape=None, dtype=tf.int32)
      action_onehot = tf.one_hot(
          action, cfg.ac_dim[0], dtype=tf.float32)
      q = tf.reduce_sum(
          tf.multiply(all_q, action_onehot), axis=1)
      artifact.update(
          {
              'all_q': all_q,
              'predict_action': predict_action,
              'action_ph': action,
              'action_onehot': action_onehot,
              'q': q
          }
      )
      return artifact
예제 #2
0
        def make_policy():
            """Build one copy of the model."""
            artifact = {}
            if cfg.intruction_repr == 'language':
                trainable_encoder = cfg.trainable_encoder
                print('The encoder is trainable: {}'.format(trainable_encoder))
                embedding = tf.get_variable(name='word_embedding',
                                            shape=(cfg.vocab_size,
                                                   cfg.embedding_size),
                                            dtype=tf.float32,
                                            trainable=trainable_encoder)
                _, goal_embedding = encoder(self.word_inputs,
                                            embedding,
                                            cfg.encoder_n_unit,
                                            trainable=trainable_encoder)
                artifact['embedding'] = embedding
            elif cfg.intruction_repr == 'one_hot':
                print('Goal input for one-hot max len {}'.format(
                    cfg.max_sequence_length))
                one_hot_goal = tf.one_hot(self.word_inputs,
                                          cfg.max_sequence_length)
                one_hot_goal.set_shape([None, cfg.max_sequence_length])
                layer_cfg = [cfg.max_sequence_length // 8, cfg.encoder_n_unit]
                goal_embedding = stack_dense_layer(one_hot_goal, layer_cfg)
            else:
                raise ValueError('Unrecognized instruction type: {}'.format(
                    cfg.instruction_repr))
            artifact['goal_embedding'] = goal_embedding

            if cfg.action_type == 'perfect':
                print('using perfect action Q function...')
                all_q, predict_object, predict_object_action = self.build_q_perfect(
                    cfg, goal_embedding)
                predict_action = tf.stack(
                    [predict_object, predict_object_action], axis=1)
                action = tf.placeholder(shape=(None, 2), dtype=tf.int32)
                stacked_indices = tf.concat([
                    tf.expand_dims(tf.range(0,
                                            tf.shape(action)[0]), axis=1),
                    action
                ],
                                            axis=1)
                q = tf.gather_nd(all_q, stacked_indices)
                artifact.update({
                    'all_q': all_q,
                    'predict_object': predict_object,
                    'predict_object_action': predict_object_action,
                    'predict_action': predict_action,
                    'action_ph': action,
                    'q': q,
                })
            elif cfg.action_type == 'discrete':
                print('using discrete action Q function...')
                ac_dim = cfg.per_input_ac_dim[0]
                all_q = self.build_q_discrete(goal_embedding, ac_dim)
                predict_action = tf.argmax(all_q, axis=-1)
                action = tf.placeholder(shape=None, dtype=tf.int32)
                action_onehot = tf.one_hot(action, ac_dim, dtype=tf.float32)
                q = tf.reduce_sum(tf.multiply(all_q, action_onehot), axis=1)
                artifact.update({
                    'all_q': all_q,
                    'predict_action': predict_action,
                    'action_ph': action,
                    'action_onehot': action_onehot,
                    'q': q,
                })
            else:
                raise ValueError('Unrecognized action type: {}'.format(
                    cfg.action_type))
            return artifact