def get_metric(self, metric_name, **kwargs):
        metric_name_parts = metric_name.split(".")
        metric_kwargs_id = get_metric_kwargs_id(metric_name, kwargs)

        metric_value = None
        # Expose overall statistics
        if metric_name_parts[0] == "statistics":
            if len(metric_name_parts) == 2:
                return self.statistics.get(metric_name_parts[1])
            else:
                raise UnavailableMetricError("Unrecognized metric {}".format(metric_name))

        # Expose expectation-defined metrics
        elif metric_name_parts[0].lower().startswith("expect_"):
            # Check our cache first
            if (metric_name, metric_kwargs_id) in self._metrics:
                return self._metrics[(metric_name, metric_kwargs_id)]
            else:
                for result in self.results:
                    try:
                        if metric_name_parts[0] == result.expectation_config.expectation_type:
                            metric_value = result.get_metric(metric_name, **kwargs)
                            break
                    except UnavailableMetricError:
                        pass
                if metric_value is not None:
                    self._metrics[(metric_name, metric_kwargs_id)] = metric_value
                    return metric_value

        raise UnavailableMetricError("Metric {} with metric_kwargs_id {} is not available.".format(metric_name,
                                                                                                   metric_kwargs_id))
    def get_metric(self, metric_name, **kwargs):
        if not self.expectation_config:
            raise UnavailableMetricError("No ExpectationConfig found in this ExpectationValidationResult. Unable to "
                                         "return a metric.")

        metric_name_parts = metric_name.split(".")
        metric_kwargs_id = get_metric_kwargs_id(metric_name, kwargs)

        if metric_name_parts[0] == self.expectation_config.expectation_type:
            curr_metric_kwargs = get_metric_kwargs_id(metric_name, self.expectation_config.kwargs)
            if metric_kwargs_id != curr_metric_kwargs:
                raise UnavailableMetricError("Requested metric_kwargs_id (%s) does not match the configuration of this "
                                             "ExpectationValidationResult (%s)." % (metric_kwargs_id or "None",
                                                                                    curr_metric_kwargs or "None"))
            if len(metric_name_parts) < 2:
                raise UnavailableMetricError("Expectation-defined metrics must include a requested metric.")
            elif len(metric_name_parts) == 2:
                if metric_name_parts[1] == "success":
                    return self.success
                else:
                    raise UnavailableMetricError("Metric name must have more than two parts for keys other than "
                                                 "success.")
            elif metric_name_parts[1] == "result":
                try:
                    if len(metric_name_parts) == 3:
                        return self.result.get(metric_name_parts[2])
                    elif metric_name_parts[2] == "details":
                        return self.result["details"].get(metric_name_parts[3])
                except KeyError:
                    raise UnavailableMetricError("Unable to get metric {} -- KeyError in "
                                                 "ExpectationValidationResult.".format(metric_name))
        raise UnavailableMetricError("Unrecognized metric name {}".format(metric_name))