def __call__(self, curves):
        """Computes normalized dispersion across runs.

    Args:
      curves: A list of learning curves, each a 2D numpy array where curve[0, :]
        is the timepoint variable and curve[1, :] is the dependent variable.

    Returns:
       Dispersion across runs, computed at each of the eval_points.
       (Numpy array with length n_eval_points).
    """
        utils.assert_non_empty(curves)

        # perform preprocessing for across-runs metrics
        eval_point_values = utils.across_runs_preprocess(
            curves, self.eval_points, self.window_size, self.lowpass_thresh)

        # compute dispersion across curves
        result = self._dispersion_fn(eval_point_values)

        if self.baseline == 'curve_range':
            curve_ranges = utils.curve_range(curves)
            result /= np.median(curve_ranges)
        elif self.baseline:
            result = result / self.baseline

        return result
    def __call__(self, curves):
        """Computes energy of the signal above a given frequency threshold.

    Normalized by the total energy of the signal.

    Args:
      curves: A list of learning curves, each a 2D numpy array where curve[0, :]
        is the timepoint variable and curve[1, :] is the dependent variable.

    Returns:
      Amount of energy above the given frequency threshold, normalized by the
      total energy of the signal.
    """
        utils.assert_non_empty(curves)

        energies = []
        for curve in curves:
            data = curve[1, :]
            power_spectrum = np.abs(np.fft.fft(data))**2
            time_step = curve[0, 1] - curve[0, 0]
            # TODO(scychan) above assumes equal spacing
            freqs = np.fft.fftfreq(data.size, time_step)
            energy_above_thresh = (
                np.sum(power_spectrum[freqs > self.thresh]) /
                np.sum(power_spectrum[freqs > 0]))
            energies.append(energy_above_thresh)

        return energies
    def __call__(self, curves):
        """Computes median performance.

    Args:
      curves: A list of learning curves, each a 2D numpy array where curve[0, :]
        is the timepoint variable and curve[1, :] is the dependent variable.

    Returns:
       Median performance, computed in a window at each eval_point for each run.
       (Numpy array with size n_run x n_eval_points.)
    """
        utils.assert_non_empty(curves)

        # Determine eval_points and window_size, if needed.
        eval_points = copy.deepcopy(self.eval_points)
        window_size = copy.deepcopy(self.window_size)
        if eval_points is None or window_size is None:
            if window_size is None:
                valid_eval_points = utils.get_all_valid_eval_points(curves, 1)
                window_size = valid_eval_points.max() - valid_eval_points.min(
                ) + 1
            if eval_points is None:
                eval_points = utils.get_all_valid_eval_points(
                    curves, window_size)

        curves = self._normalize(curves)

        perf = utils.apply_window_fn(curves, eval_points, np.median,
                                     window_size)
        return perf
    def __call__(self, rollout_sets):
        """Computes dispersion across rollouts.

    Args:
      rollout_sets: A list of rollout sets, with length n_rollout_sets.
        Each element of the list corresponds to the performance values of one
        set of rollouts that we will measure dispersion across (e.g. for a
        single model checkpoint). It is a 2D numpy array where rollouts[0, :] is
        just an index variable (e.g. range(0, n_rollouts)) and rollouts[1, :]
        are the performances per rollout.

    Returns:
       Dispersion across rollouts, computed for each rollout set.
       (1-D Numpy array with length = n_rollout_sets)
    """
        utils.assert_non_empty(rollout_sets)

        dispersions = []
        for rollout_set in rollout_sets:
            dispersion = self._dispersion_fn(rollout_set[1, :])
            dispersions.append(dispersion)

        dispersions = np.array(dispersions)

        if self.baseline:
            if self.baseline == 'median_perf':
                divisor = utils.median_rollout_performance(rollout_sets)
            else:
                divisor = self.baseline
            dispersions /= divisor
        return dispersions
    def __call__(self, rollout_sets):
        """Computes CVaR across rollouts of a fixed policy.

    Args:
      rollout_sets: A list of rollout sets, with length n_rollout_sets.
        Each element of the list corresponds to the performance values of one
        set of rollouts that we will measure dispersion across (e.g. for a
        single model checkpoint). It is a 2D numpy array where rollouts[0, :] is
        just an index variable (e.g. range(0, n_rollouts)) and rollouts[1, :]
        are the performances per rollout.

    Returns:
      CVaR across rollouts, computed for each rollout set.
       (1-D Numpy array with length = n_rollout_sets)
    """
        utils.assert_non_empty(rollout_sets)

        if self.baseline is not None:
            if self.baseline == 'median_perf':
                divisor = utils.median_rollout_performance(rollout_sets)
            else:
                divisor = self.baseline
            rollout_sets = utils.divide_by_baseline(rollout_sets, divisor)

        cvar_list = []
        # Compute CVaR within each rollout set.
        for rollout_set in rollout_sets:
            dependent_var = rollout_set[1, :]
            cvar = utils.compute_cvar(dependent_var, self.tail, self.alpha)
            cvar_list.append(cvar)

        return np.array(cvar_list)
    def __call__(self, curves):
        """Computes CVaR for a list of curves.

    Args:
      curves: A list of learning curves, each a 2D numpy array where curve[0, :]
        is the timepoint variable and curve[1, :] is the dependent variable.

    Returns:
      for self.target in ['diffs', 'raw', 'drawdown']:
         A 1-D numpy array of CVaR values, one per curve in the input
         (length = the number of curves in the input).
      for self.target == 'across':
        A 1-D numpy array of CVaR values, one per eval point
        (length = number of eval points)
    """
        utils.assert_non_empty(curves)

        if self.baseline == 'curve_range':
            curve_ranges = utils.curve_range(curves)
            curves = utils.divide_by_baseline(curves, curve_ranges)
        elif self.baseline:
            curves = utils.divide_by_baseline(curves, self.baseline)

        cvar_list = []
        if self.target == 'across':
            # Compute CVaR across curves (at each eval point)
            eval_point_vals = utils.across_runs_preprocess(
                curves, self.eval_points, self.window_size,
                self.lowpass_thresh)
            n_eval_points = eval_point_vals.shape[1]
            for i_point in range(n_eval_points):
                cvar = utils.compute_cvar(eval_point_vals[:, i_point],
                                          self.tail, self.alpha)
                cvar_list.append(cvar)
        else:
            # Compute CVaR within curves (one per curve).
            for curve in curves:
                dependent_var = curve[1, :]
                if self.target == 'raw':
                    pass
                elif self.target == 'diffs':
                    normalized_diffs = utils.differences([curve])[0]
                    dependent_var = normalized_diffs[1, :]
                elif self.target == 'drawdown':
                    dependent_var = utils.compute_drawdown(dependent_var)

                cvar = utils.compute_cvar(dependent_var, self.tail, self.alpha)
                cvar_list.append(cvar)

        return np.array(cvar_list)
    def __call__(self, curves):
        """Computes dispersion within runs.

    Args:
      curves: A list of learning curves, each a 2D numpy array where curve[0, :]
        is the timepoint variable and curve[1, :] is the dependent variable.

    Returns:
       Dispersion within runs, computed at each eval_point for each run.
       (Numpy array with size n_run x n_eval_points.)
    """
        utils.assert_non_empty(curves)

        # Detrend by differencing.
        if self.detrend:
            diff_curves = utils.differences(curves)
        else:
            diff_curves = curves

        dispersions = []
        # Process each curve separately, because length of run may differ for each.
        for curve, diff_curve in zip(curves, diff_curves):
            eval_points = copy.deepcopy(self.eval_points)
            window_size = copy.deepcopy(self.window_size)

            # Determine eval_points and window_size, if needed (based on diff_curve).
            if self.eval_points is None or self.window_size is None:
                if self.window_size is None:
                    valid_eval_points = utils.get_all_valid_eval_points(
                        [diff_curve], 1)
                    window_size = valid_eval_points.max(
                    ) - valid_eval_points.min() + 1
                if self.eval_points is None:
                    eval_points = utils.get_all_valid_eval_points([diff_curve],
                                                                  window_size)

            # Compute dispersion for the curve.
            diffcurve_dispers = utils.apply_window_fn([diff_curve],
                                                      eval_points,
                                                      self._dispersion_fn,
                                                      window_size)

            if self.baseline == 'curve_range':
                curve_range = utils.curve_range([curve])[0]
                diffcurve_dispers = diffcurve_dispers / curve_range
            elif self.baseline:
                diffcurve_dispers /= self.baseline
            dispersions.extend(diffcurve_dispers)

        return np.array(dispersions)
    def __call__(self, curves):
        """Compute maximum drawdown."""
        utils.assert_non_empty(curves)

        if self.baseline is not None:
            curves = utils.subtract_baseline(curves, self.baseline)
        if self.mean_normalize:
            curves = utils.mean_normalization(curves)

        mdd = np.empty(len(curves))
        for i, curve in enumerate(curves):
            dependent_vals = curve[1, :]
            drawdown = utils.compute_drawdown(dependent_vals)
            mdd[i] = np.max(drawdown)
        return mdd