def evaluate(self, x, y=None, batch_size=32, **kwargs): """Evaluate the best model for the given data. # Arguments x: Any allowed types according to the input node. Testing data. y: Any allowed types according to the head. Testing targets. Defaults to None. **kwargs: Any arguments supported by keras.Model.evaluate. # Returns Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute model.metrics_names will give you the display labels for the scalar outputs. """ self._check_data_format((x, y)) if isinstance(x, tf.data.Dataset): dataset = x x = dataset.map(lambda x, y: x) y = dataset.map(lambda x, y: y) x = self._adapt(x, self.inputs, batch_size) y = self._adapt(y, self._heads, batch_size) dataset = tf.data.Dataset.zip((x, y)) pipeline = self.tuner.get_best_pipeline() dataset = pipeline.transform(dataset) model = self.tuner.get_best_model() return utils.evaluate_with_adaptive_batch_size(model=model, batch_size=batch_size, x=dataset, **kwargs)
def evaluate(self, x, y=None, batch_size=32, verbose=1, **kwargs): """Evaluate the best model for the given data. # Arguments x: Any allowed types according to the input node. Testing data. y: Any allowed types according to the head. Testing targets. Defaults to None. batch_size: Number of samples per batch. If unspecified, batch_size will default to 32. verbose: Verbosity mode. 0 = silent, 1 = progress bar. Controls the verbosity of [keras.Model.evaluate](http://tensorflow.org/api_docs/python/tf/keras/Model#evaluate) **kwargs: Any arguments supported by keras.Model.evaluate. # Returns Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute model.metrics_names will give you the display labels for the scalar outputs. """ self._check_data_format((x, y)) if isinstance(x, tf.data.Dataset): dataset = x x = dataset.map(lambda x, y: x) y = dataset.map(lambda x, y: y) x = self._adapt(x, self.inputs, batch_size) y = self._adapt(y, self._heads, batch_size) dataset = tf.data.Dataset.zip((x, y)) pipeline = self.tuner.get_best_pipeline() dataset = pipeline.transform(dataset) model = self.tuner.get_best_model() return utils.evaluate_with_adaptive_batch_size( model=model, batch_size=batch_size, x=dataset, verbose=verbose, **kwargs )