def predict_fn(model_target, batch): emb = models.predict_step(model_target, batch, bos_token=model.bos_token, output_head=output_head) if reduce_fn: emb = reduce_fn(emb) return emb
def predict_fn(model_target, inputs): emb = models.predict_step(model_target, inputs, preprocess_fn=model.preprocess, output_head=output_head) if reduce_fn: emb = reduce_fn(emb) return emb
def predict_fn(model_target, inputs): emb = models.predict_step(model_target, inputs, preprocess_fn=model.preprocess, output_head=output_head) if reduce_fn: # Pass the inputs to allow padding-aware aggregation. emb = reduce_fn(emb, inputs) return emb
def test_output_head(self, output_head, multiple_heads): domain = domains.FixedLengthDiscreteDomain(vocab_size=2, length=2) inputs = domain.sample_uniformly(8) lm = lm_cls(domain=domain, pmap=False) outputs = models.predict_step(lm.optimizer.target, inputs, preprocess_fn=lm.preprocess, output_head=output_head) if multiple_heads: self.assertIsInstance(outputs, dict) self.assertLen(outputs, len(output_head)) else: # We should have gotten a single output, the logits. self.assertEqual(outputs.shape, (inputs.shape[0], inputs.shape[1], lm.vocab_size))