def output_spec(self): return { 'preds': lit_types.MulticlassPreds(vocab=self.LABELS), 'grads': lit_types.ImageGradients(align='image', grad_target_field_key='grad_target'), }
def output_spec(self): return { 'probas': lit_types.MulticlassPreds(parent='label', vocab=['0', '1'], null_idx=0), 'input_embs': lit_types.TokenEmbeddings(align='tokens'), }
def output_spec(self): return { 'preds': lit_types.MulticlassPreds(vocab=self.labels, autosort=True), 'grads': lit_types.ImageGradients(align='image', grad_target_field_key='grad_target'), 'grad_target': lit_types.CategoryLabel(vocab=self.labels) }
def output_spec(self) -> lit_types.Spec: return { "tokens": lit_types.Tokens(), "probas": lit_types.MulticlassPreds(parent="label", vocab=self.LABELS, null_idx=0), "cls_emb": lit_types.Embeddings() }
def test_is_compatible(self): corpusblue_metrics = metrics.CorpusBLEU() # Only compatible with GeneratedText spec. self.assertTrue(corpusblue_metrics.is_compatible( types.GeneratedText())) self.assertFalse( corpusblue_metrics.is_compatible( types.MulticlassPreds(vocab=['']))) self.assertFalse( corpusblue_metrics.is_compatible(types.RegressionScore()))
def test_is_compatible(self): multiclass_metrics = metrics.MulticlassMetrics() # Only compatible with MulticlassPreds spec. self.assertTrue( multiclass_metrics.is_compatible( types.MulticlassPreds(vocab=['']))) self.assertFalse( multiclass_metrics.is_compatible(types.RegressionScore())) self.assertFalse( multiclass_metrics.is_compatible(types.GeneratedText()))
def output_spec(self): return { 'probas': lit_types.MulticlassPreds( parent='label', vocab=['0', '1'], null_idx=0), 'cls_emb': lit_types.Embeddings(), 'cls_grad': lit_types.Gradients(grad_for='cls_emb', grad_target='grad_class'), 'grad_class': lit_types.CategoryLabel() }
def output_spec(self): return {'probas': lit_types.MulticlassPreds( parent='label', vocab=['0', '1'], null_idx=0), 'input_embs': lit_types.TokenEmbeddings(align='tokens'), 'input_embs_grad': lit_types.TokenGradients(align='tokens', grad_for='input_embs', grad_target='grad_class' ), 'tokens': lit_types.Tokens(), 'grad_class': lit_types.CategoryLabel(vocab=['0', '1']) }
def output_spec(self): # TODO(lit-dev): also return the embeddings for each span on datasets # with a fixed number of targets; for Winogender this would be # {occupation, other participant, pronoun} return { 'tokens': lit_types.Tokens(parent='text'), 'coref': lit_types.EdgeLabels(align='tokens'), 'pred_answer': lit_types.MulticlassPreds(vocab=winogender.ANSWER_VOCAB, parent='answer'), }
def output_spec(self) -> Spec: ret = {"tokens": lit_types.Tokens()} ret["tokens_" + self.config.text_a_name] = lit_types.Tokens( parent=self.config.text_a_name) if self.config.text_b_name: ret["tokens_" + self.config.text_b_name] = lit_types.Tokens( parent=self.config.text_b_name) if self.is_regression: ret["score"] = lit_types.RegressionScore(parent=self.config.label_name) else: ret["probas"] = lit_types.MulticlassPreds( parent=self.config.label_name, vocab=self.config.labels, null_idx=self.config.null_label_idx) ret["cls_emb"] = lit_types.Embeddings() # Average embeddings, one per layer including embeddings. for i in range(1 + self.model.config.num_hidden_layers): ret[f"layer_{i}/avg_emb"] = lit_types.Embeddings() ret["cls_grad"] = lit_types.Gradients( grad_for="cls_emb", grad_target_field_key="grad_class") # The input_embs_ and grad_class fields are used for Integrated Gradients. ret["input_embs_" + self.config.text_a_name] = lit_types.TokenEmbeddings( align="tokens_" + self.config.text_a_name) if self.config.text_b_name: ret["input_embs_" + self.config.text_b_name] = lit_types.TokenEmbeddings( align="tokens_" + self.config.text_b_name) # Gradients, if requested. if self.config.compute_grads: ret["grad_class"] = lit_types.CategoryLabel(required=False, vocab=self.config.labels) ret["token_grad_" + self.config.text_a_name] = lit_types.TokenGradients( align="tokens_" + self.config.text_a_name, grad_for="input_embs_" + self.config.text_a_name, grad_target_field_key="grad_class") if self.config.text_b_name: ret["token_grad_" + self.config.text_b_name] = lit_types.TokenGradients( align="tokens_" + self.config.text_b_name, grad_for="input_embs_" + self.config.text_b_name, grad_target_field_key="grad_class") # Attention heads, one field for each layer. for i in range(self.model.config.num_hidden_layers): ret[f"layer_{i+1}/attention"] = lit_types.AttentionHeads( align_in="tokens", align_out="tokens") return ret
def output_spec(self) -> lit_types.Spec: """Give the output specifications.""" ret = { "tokens": lit_types.Tokens(), "probas": lit_types.MulticlassPreds(parent="label", vocab=self.LABELS), "cls_emb": lit_types.Embeddings() } # Gradients, if requested. if self.compute_grads: ret["token_grad_sentence"] = lit_types.TokenGradients(align="tokens") # Attention heads, one field for each layer. for i in range(self.model.config.num_hidden_layers): ret[f"layer_{i}/attention"] = lit_types.AttentionHeads(align=("tokens", "tokens")) return ret
def output_spec(self) -> lit_types.Spec: spec = { "tokens": lit_types.Tokens(), "bio_tags": lit_types.SequenceTags(align="tokens"), "token_ids": lit_types.SequenceTags(align="tokens"), "grads": lit_types.TokenGradients(align="tokens"), "probas": lit_types.MulticlassPreds(parent="bio_tags", vocab=self.LABELS) } for i in range(self.model.config.num_hidden_layers): spec[f'layer_{i}/attention'] = lit_types.AttentionHeads( align=("tokens", "tokens")) return spec
def testTfxModel(self): input_spec = {'input_0': lit_types.Scalar()} output_spec = { 'output_0': lit_types.MulticlassPreds(vocab=['0', '1'], parent='input_0') } config = tfx_model.TFXModelConfig(self._path, input_spec, output_spec) lit_model = tfx_model.TFXModel(config) result = list(lit_model.predict([{'input_0': 0.5}])) self.assertLen(result, 1) result = result[0] self.assertListEqual(list(result.keys()), ['output_0']) self.assertLen(result['output_0'], 2) self.assertIsInstance(result['output_0'][0], float) self.assertIsInstance(result['output_0'][1], float) self.assertDictEqual(lit_model.input_spec(), input_spec) self.assertDictEqual(lit_model.output_spec(), output_spec)
def output_spec(self): # TODO(lit-dev): also return the embeddings for each span on datasets # with a fixed number of targets; for Winogender this would be # {occupation, other participant, pronoun} return { 'tokens': lit_types.Tokens(parent='text'), 'coref': lit_types.EdgeLabels(align='tokens'), 'pred_answer': lit_types.MulticlassPreds(vocab=winogender.ANSWER_VOCAB, parent='answer'), # TODO(b/172975096): allow plotting of scalars from input data, # so we don't need to add this to the predictions. 'pf_bls': lit_types.Scalar(), }
def setUp(self): super(ClassifcationMarginTest, self).setUp() self.inputs = [{'s': 'hi', 'n': 2}, {'s': 'bye', 'n': 1}] self.preds = [[0.3, 0.7], [0.6, 0.4]] self.pred_spec = types.MulticlassPreds(vocab=['0', '1'], null_idx=0)
def output_spec(self): return {'probas': lit_types.MulticlassPreds(vocab=['0', '1'])}
def output_spec(self): return { 'predicted_species': lit_types.MulticlassPreds(parent='species', vocab=VOCABS['species']) }