def run_with_metadata(self, indexed_inputs: Sequence[IndexedInput], model: lit_model.Model, dataset: lit_dataset.IndexedDataset, model_outputs: Optional[List[JsonDict]] = None, config: Optional[JsonDict] = None) -> List[JsonDict]: if model_outputs is None: model_outputs = list(model.predict_with_metadata(indexed_inputs)) # TODO(lit-team): pre-compute this mapping in constructor? # This would require passing a model name to this function so we can # reference a pre-computed list. spec = model.spec() field_map = map_pred_keys(dataset.spec(), spec.output, self.is_compatible) ret = [] for pred_key, label_key in field_map.items(): # Extract fields labels = [ex['data'][label_key] for ex in indexed_inputs] preds = [mo[pred_key] for mo in model_outputs] indices = [ex['id'] for ex in indexed_inputs] metas = [ex.get('meta', {}) for ex in indexed_inputs] # Compute metrics, as dict(str -> float) metrics = self.compute_with_metadata( labels, preds, label_spec=dataset.spec()[label_key], pred_spec=spec.output[pred_key], indices=indices, metas=metas, config=config.get(pred_key) if config else None) # NaN is not a valid JSON value, so replace with None which will be # serialized as null. # TODO(lit-team): move this logic into serialize.py somewhere instead? metrics = { k: (v if not np.isnan(v) else None) for k, v in metrics.items() } # Format for frontend. ret.append({ 'pred_key': pred_key, 'label_key': label_key, 'metrics': metrics }) return ret
def run(self, inputs: List[JsonDict], model: lit_model.Model, dataset: lit_dataset.Dataset, model_outputs: Optional[List[JsonDict]] = None, config: Optional[JsonDict] = None): # Get margin for each input for each pred key and add them to a config dict # to pass to the wrapped metrics. field_map = map_pred_keys(dataset.spec(), model.spec().output, self.is_compatible) margin_config = {} for pred_key in field_map: field_config = config.get(pred_key) if config else None margins = [ get_margin_for_input(field_config, inp) for inp in inputs ] margin_config[pred_key] = margins return self._metrics.run(inputs, model, dataset, model_outputs, margin_config)
def run(self, inputs: List[JsonDict], model: lit_model.Model, dataset: lit_dataset.Dataset, model_outputs: Optional[List[JsonDict]] = None, config: Optional[JsonDict] = None): if model_outputs is None: model_outputs = list(model.predict(inputs)) spec = model.spec() field_map = map_pred_keys(dataset.spec(), spec.output, self.is_compatible) ret = [] for pred_key, label_key in field_map.items(): # Extract fields labels = [ex[label_key] for ex in inputs] preds = [mo[pred_key] for mo in model_outputs] # Compute metrics, as dict(str -> float) metrics = self.compute( labels, preds, label_spec=dataset.spec()[label_key], pred_spec=spec.output[pred_key], config=config.get(pred_key) if config else None) # NaN is not a valid JSON value, so replace with None which will be # serialized as null. # TODO(lit-team): move this logic into serialize.py somewhere instead? metrics = { k: (v if not np.isnan(v) else None) for k, v in metrics.items() } # Format for frontend. ret.append({ 'pred_key': pred_key, 'label_key': label_key, 'metrics': metrics }) return ret