예제 #1
0
    def evaluate_generator(self,
                           feed_dict_generator,
                           metrics,
                           transformers=[],
                           labels=None,
                           outputs=None,
                           weights=[],
                           per_task_metrics=False):

        if labels is None:
            raise ValueError
        n_tasks = len(self.outputs)
        n_classes = self.outputs[0].out_tensor.get_shape()[-1].value
        evaluator = GeneratorEvaluator(self,
                                       feed_dict_generator,
                                       transformers,
                                       labels=labels,
                                       outputs=outputs,
                                       weights=weights,
                                       n_tasks=n_tasks,
                                       n_classes=n_classes)
        if not per_task_metrics:
            scores = evaluator.compute_model_performance(metrics)
            return scores
        else:
            scores, per_task_scores = evaluator.compute_model_performance(
                metrics, per_task_metrics=per_task_metrics)
            return scores, per_task_scores
예제 #2
0
  def evaluate_generator(self,
                         generator: Iterable[Tuple[Any, Any, Any]],
                         metrics: List[Metric],
                         transformers: List[Transformer] = [],
                         per_task_metrics: bool = False):
    """Evaluate the performance of this model on the data produced by a generator.

    Parameters
    ----------
    generator: generator
      this should generate batches, each represented as a tuple of the form
      (inputs, labels, weights).
    metric: list of deepchem.metrics.Metric
      Evaluation metric
    transformers: list of dc.trans.Transformers
      Transformers that the input data has been transformed by.  The output
      is passed through these transformers to undo the transformations.
    per_task_metrics: bool
      If True, return per-task scores.

    Returns
    -------
    dict
      Maps tasks to scores under metric.
    """
    evaluator = GeneratorEvaluator(self, generator, transformers)
    return evaluator.compute_model_performance(metrics, per_task_metrics)
예제 #3
0
  def evaluate_generator(self,
                         feed_dict_generator,
                         metrics,
                         transformers=[],
                         labels=None,
                         outputs=None,
                         weights=[],
                         per_task_metrics=False):

    if labels is None:
      raise ValueError
    evaluator = GeneratorEvaluator(
        self,
        feed_dict_generator,
        transformers,
        labels=labels,
        outputs=outputs,
        weights=weights)
    if not per_task_metrics:
      scores = evaluator.compute_model_performance(metrics)
      return scores
    else:
      scores, per_task_scores = evaluator.compute_model_performance(
          metrics, per_task_metrics=per_task_metrics)
      return scores, per_task_scores
예제 #4
0
  def evaluate_generator(self,
                         feed_dict_generator,
                         metrics,
                         transformers=[],
                         labels=None,
                         outputs=None,
                         weights=[],
                         per_task_metrics=False):

    if labels is None:
      raise ValueError
    n_tasks = len(self.outputs)
    n_classes = self.outputs[0].out_tensor.get_shape()[-1].value
    evaluator = GeneratorEvaluator(
        self,
        feed_dict_generator,
        transformers,
        labels=labels,
        outputs=outputs,
        weights=weights,
        n_tasks=n_tasks,
        n_classes=n_classes)
    if not per_task_metrics:
      scores = evaluator.compute_model_performance(metrics)
      return scores
    else:
      scores, per_task_scores = evaluator.compute_model_performance(
          metrics, per_task_metrics=per_task_metrics)
      return scores, per_task_scores
예제 #5
0
def test_generator_evaluator_dc_metric_multitask():
  """Test generator evaluator on a generator."""
  X = np.random.rand(10, 5)
  y = np.random.rand(10, 1)
  dataset = dc.data.NumpyDataset(X, y)
  model = dc.models.MultitaskRegressor(1, 5)
  generator = model.default_generator(dataset, pad_batches=False)
  evaluator = GeneratorEvaluator(model, generator, [])
  metric = dc.metrics.Metric(dc.metrics.mae_score)
  multitask_scores = evaluator.compute_model_performance(metric)
  assert isinstance(multitask_scores, dict)
  assert len(multitask_scores) == 1
  assert multitask_scores['mae_score'] > 0