Exemplo n.º 1
0
def test_HistoryMatching_select_expectations():
    "test the select_expectations method of HistoryMatching"

    # correct functionality

    expectations = (np.array([2., 10.]), np.array([0.,
                                                   0.]), np.array([[1., 2.]]))
    hm = HistoryMatching(obs=[1., 1.], expectations=expectations)

    expectations_new = hm._select_expectations()

    for a, b in zip(expectations, expectations_new):
        assert_allclose(a, b)

    gp = GaussianProcess(np.reshape(np.linspace(0., 1.), (-1, 1)),
                         np.linspace(0., 1.))
    np.random.seed(57483)
    gp.learn_hyperparameters()
    coords = np.array([[0.1], [1.]])
    obs = [1., 0.01]
    expectations = gp.predict(coords)

    hm = HistoryMatching(gp=gp, obs=obs, coords=coords)

    expectations_new = hm._select_expectations()

    for a, b in zip(expectations, expectations_new):
        assert_allclose(a, b)

    # ncoords somehow not set

    hm.ncoords = None
    with pytest.raises(ValueError):
        hm._select_expectations()

    # both coords and expectations set

    hm = HistoryMatching(gp=gp,
                         obs=obs,
                         coords=coords,
                         expectations=expectations)

    with pytest.raises(ValueError):
        hm._select_expectations()

    # if no expectations provided, fails

    hm = HistoryMatching(obs=obs)

    with pytest.raises(ValueError):
        hm._select_expectations()
Exemplo n.º 2
0
def test_HistoryMatching_init():
    "test the init method of HistoryMatching"

    # basic functionality

    hm = HistoryMatching()

    assert hm.gp == None
    assert hm.obs == None
    assert hm.coords == None
    assert hm.expectations == None
    assert hm.ndim == None
    assert hm.ncoords == None
    assert_allclose(hm.threshold, 3.)
    assert hm.I == None
    assert hm.NROY == None
    assert hm.RO == None

    # with gp/coords, obs, and threshold

    gp = GaussianProcess(np.reshape(np.linspace(0., 1.), (-1, 1)),
                         np.linspace(0., 1.))
    coords = np.array([[0.2], [0.4]])
    hm = HistoryMatching(gp=gp,
                         obs=1.,
                         coords=coords,
                         expectations=None,
                         threshold=5.)

    assert hm.gp == gp
    assert_allclose(hm.obs, [1., 0.])
    assert_allclose(hm.coords, coords)
    assert hm.expectations == None
    assert hm.ndim == 1
    assert hm.ncoords == len(coords)
    assert_allclose(hm.threshold, 5.)
    assert hm.I == None
    assert hm.NROY == None
    assert hm.RO == None

    # with obs, expectations, coords, and threshold

    expectations = (np.array([1.]), np.array([0.2]), np.array([[0.1]]))
    hm = HistoryMatching(gp=None,
                         obs=[1., 0.1],
                         coords=None,
                         expectations=expectations,
                         threshold=5.)

    assert hm.gp == None
    assert_allclose(hm.obs, [1., 0.1])
    assert hm.coords == None
    for a, b in zip(hm.expectations, expectations):
        assert_allclose(a, b)
    assert hm.ndim == None
    assert hm.ncoords == len(expectations[0])
    assert_allclose(hm.threshold, 5.)
    assert hm.I == None
    assert hm.NROY == None
    assert hm.RO == None
Exemplo n.º 3
0
def test_HistoryMatching_get_implausibility():
    "test the get_implausibility method of HistoryMatching"

    # correct functionality

    expectations = (np.array([2., 10.]), np.array([0.,
                                                   0.]), np.array([[1., 2.]]))
    hm = HistoryMatching(obs=[1., 1.], expectations=expectations)
    I = hm.get_implausibility()

    assert_allclose(I, [1., 9.])
    assert_allclose(hm.I, [1., 9.])

    I = hm.get_implausibility(1.)

    assert_allclose(I, [1. / np.sqrt(2.), 9. / np.sqrt(2.)])
    assert_allclose(hm.I, [1. / np.sqrt(2.), 9. / np.sqrt(2.)])

    gp = GaussianProcess(np.reshape(np.linspace(0., 1.), (-1, 1)),
                         np.linspace(0., 1.))
    np.random.seed(57483)
    gp.learn_hyperparameters()
    coords = np.array([[0.1], [1.]])
    obs = [1., 0.01]
    mean, unc, _ = gp.predict(coords)
    I_exp = np.abs(mean - obs[0]) / np.sqrt(unc + obs[1])

    hm = HistoryMatching(gp=gp, obs=obs, coords=coords)
    I = hm.get_implausibility()

    assert_allclose(I, I_exp)
    assert_allclose(hm.I, I_exp)

    # no observations

    hm = HistoryMatching(expectations=expectations)

    with pytest.raises(ValueError):
        hm.get_implausibility()

    # negative variance for model discrepancy

    hm = HistoryMatching(obs=[1., 1.], expectations=expectations)

    with pytest.raises(AssertionError):
        hm.get_implausibility(-1.)
Exemplo n.º 4
0
    def _eval_metric(self):
        """
        Evaluate MICE criterion on all candidate points and select new design point

        This internal method computes the MICE criterion on all candidate points and returns
        the index of the point with the maximum value. It does so by first fitting a base GP
        to all points in the current design, and then fitting a dummy GP to all candidate
        design points using the parameter values determined from the base GP fit. The MICE
        criterion does not depend on the target values, since the parameters are determined
        via the base GP and the MICE criterion only depends on the uncertainty of the
        candidate GP (which is independent of the target values). These fit GPs are then used
        to compute the MICE criterion for each candidate point, and the method returns the
        index of the point that had the maximum value of the MICE criterion.

        :returns: Index of the candidate with the maximum MICE score (integer with
                  ``0 <= index < n_cand``)
        :rtype: int
        """

        numtries = 10

        for i in range(numtries):
            try:
                self.gp = GaussianProcess(self.inputs,
                                          self.targets,
                                          nugget=self.nugget)
                self.gp = fit_GP_MAP(self.gp)

                self.gp_fast = MICEFastGP(self.candidates,
                                          np.ones(self.n_cand),
                                          nugget=np.exp(self.gp.theta[-2]) *
                                          self.nugget_s)
                self.gp_fast.theta = self.gp.theta
                break
            except FloatingPointError:
                if i < numtries - 1:
                    continue
                else:
                    raise FloatingPointError(
                        "Unable to find parameters suitable for both GPs")
            except LinAlgError:
                if i < numtries - 1:
                    continue
                else:
                    raise LinAlgError(
                        "Unable to find parameters suitable for both GPs")

        results = []

        for point in range(self.n_cand):
            results.append(self._MICE_criterion(point))

        return np.argmax(results)
Exemplo n.º 5
0
def test_HistoryMatching_set_gp():
    'test the set_gp method of HistoryMatching'

    # correct functionality

    gp = GaussianProcess(np.reshape(np.linspace(0., 1.), (-1, 1)),
                         np.linspace(0., 1.))

    hm = HistoryMatching()
    hm.set_gp(gp)

    assert hm.gp == gp

    # bad type for GP

    with pytest.raises(TypeError):
        hm.set_gp(gp=1.)
def demo_1D():
    # Create a gaussian process
    x_training = np.array([
        [0.],
        [10.],
        [20.],
        [30.],
        [43.],
        [50.]
    ])

    y_training = get_y_simulated_1D(x_training)

    gp = GaussianProcess(x_training, y_training)
    np.random.seed(47)
    gp = fit_GP_MAP(gp)

    # Define observation
    obs = [-0.8, 0.0004]

    # Coords to predict
    n_rand = 2000
    x_predict_min = -3
    x_predict_max = 53
    x_predict = np.random.rand(n_rand)
    x_predict = np.sort(x_predict, axis=0)
    x_predict *= (x_predict_max - x_predict_min)
    x_predict += x_predict_min
    x_predict = x_predict[:,None]

    coords = x_predict

    # Compute GPE expectations
    expectations = gp.predict(coords)

    # Compute Implausbility
    hm = HistoryMatching(obs=obs, expectations=expectations)
    I = hm.get_implausibility()
    NROY = hm.get_NROY()
    RO = hm.get_RO()

    print("Fraction of points ruled out {:6}".format(str(float(len(RO))/float(n_rand))))

    # Plotting

    if makeplots:
        fig, axs = plt.subplots(2, 1, sharex=True)
        fig.subplots_adjust(hspace=0)
        x_hist_plot = [min(x_predict)[0], max(x_predict)[0]]
        y_hist_plot = [obs[0], obs[0]]
        y_hist_err = 3*np.sqrt(obs[1])
        y_hist_up = [val + y_hist_err for val in y_hist_plot]
        y_hist_dn = [val - y_hist_err for val in y_hist_plot]

        axs[0].plot(                # Horizontal line at value of y_obs
            x_hist_plot,
            y_hist_plot,
            color = 'black',
            label = 'observation'
        )

        axs[0].fill_between(        # Error bounds on y_obs
            x_hist_plot,
            y_hist_dn,
            y_hist_up,
            color='black',
            alpha=0.25
        )

        axs[0].plot(                # Simulator output
            coords,
            get_y_simulated_1D(coords),
            color = 'black',
            label = 'simulator'
        )

        axs[0].scatter(             # Training Data
            x_training,
            y_training,
            marker = '.',
            color  = 'black',
            label  = 'Training Data',
            s      = 100
        )

        axs[0].plot(                # GPE expectation
            coords,
            expectations[0],
            color = 'red',
            label = 'GPE'
        )

        axs[0].fill_between(        # GPE uncertainty
            coords[:,0],
            expectations[0] - 3*np.sqrt(expectations[1]),
            expectations[0] + 3*np.sqrt(expectations[1]),
            color = 'red',
            alpha = 0.5
        )

        axs[1].scatter(             # Implausibility
            coords,
            I,
            marker='.',
            color='black'
        )

        axs[1].scatter(
            coords[NROY],
            I[NROY],
            marker='.',
            color='green'
        )

        axs[1].plot(                # implausibility Threshold
            x_hist_plot,
            [3,3],
            color = 'green',
            label = 'implausibility threshold'
        )

        axs[0].set(
            ylabel='Model Output f(x)'
        )
        axs[1].set(
            xlabel='Imput Parameter x',
            ylabel='Implausibility I(x)',
            ylim=(-1, 21)
        )


        plt.savefig('histmatch_1D.png', bbox_inches = 'tight')
def demo_2D():
    # Create a Gaussian Process
    x_training = np.array([
        [0., 0.],
        [1.5, 1.5],
        [3., 3.],
        [0., 1.5],
        [1.5, 0.],
        [0., 3.],
        [3., 0.],
        [3., 1.5],
        [1.5, 3.]
    ])

    y_training = get_y_simulated_2D(x_training)

    gp = GaussianProcess(x_training, y_training)
    np.random.seed(47)
    gp = fit_GP_MAP(gp)

    # Define observation
    obs = [0.1, 0.0004]

    # Coords to predict
    n_rand = 2000
    a_predict_min = 0
    a_predict_max = np.pi
    b_predict_min = a_predict_min
    b_predict_max = a_predict_max
    a_predict = np.random.rand(n_rand) * (a_predict_max - a_predict_min) + a_predict_min
    b_predict = np.random.rand(n_rand) * (b_predict_max - b_predict_min) + b_predict_min
    x_predict = np.concatenate((a_predict[:,None], b_predict[:,None]), axis=1)
    x_predict = np.concatenate((x_predict, x_training), axis=0)

    coords = x_predict

    # Compute GPE expectations
    expectations = gp.predict(coords)

    # Compute Implausbility
    hm = HistoryMatching(obs=obs, expectations=expectations)
    I = hm.get_implausibility()
    NROY = hm.get_NROY()
    RO = hm.get_RO()

    print("Fraction of points ruled out {:6}".format(str(float(len(RO))/float(n_rand))))

    # Plotting
    if makeplots:
        from mpl_toolkits.mplot3d import Axes3D
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')

        Axes3D.scatter(     # Training Data
            ax,
            x_training[:,0],
            x_training[:,1],
            y_training,
            color='black',
            marker = '.',
            s = 100
        )

        #Axes3D.scatter(      # GPE prediction
        #    ax,
        #    coords[:,0],
        #    coords[:,1],
        #    expectations[0],
        #    color='red',
        #    marker='.',
        #    s=2
        #)

        Axes3D.scatter(     # GPE prediction uncertainty
            ax,
            coords[:,0][RO],
            coords[:,1][RO],
            expectations[0][RO] + 3*np.sqrt(expectations[1][RO]),
            color='red',
            marker='.',
            s=1
        )
        Axes3D.scatter(
            ax,
            coords[:,0][RO],
            coords[:,1][RO],
            expectations[0][RO] - 3*np.sqrt(expectations[1][RO]),
            color='red',
            marker='.',
            s=1
        )
        Axes3D.scatter(
            ax,
            coords[:,0][NROY],
            coords[:,1][NROY],
            expectations[0][NROY] + 3*np.sqrt(expectations[1][NROY]),
            color='green',
            marker='.',
            s=1
        )
        Axes3D.scatter(
            ax,
            coords[:,0][NROY],
            coords[:,1][NROY],
            expectations[0][NROY] - 3*np.sqrt(expectations[1][NROY]),
            color='green',
            marker='.',
            s=1
        )

        Axes3D.set(
            ax,
            xlabel = 'Input parameter a',
            ylabel = 'Input parameter b',
            zlabel = 'Model output f(a, b)'
        )



        plt.savefig('histmatch_2D.png', bbox_inches = "tight")
Exemplo n.º 8
0
def test_sanity_checks():
    "test basic functioning of HistoryMatching"

    # Create a gaussian process
    x_training = np.array([[0.], [10.], [20.], [30.], [43.], [50.]])

    y_training = get_y_simulated_1D(x_training)

    gp = GaussianProcess(x_training, y_training)
    np.random.seed(47)
    gp.learn_hyperparameters()

    # Define observation and implausibility threshold
    obs = [-0.8, 0.0004]

    # Coords to predict
    n_rand = 2000
    x_predict_min = -3
    x_predict_max = 53
    x_predict = np.random.rand(n_rand)
    x_predict = np.sort(x_predict, axis=0)
    x_predict *= (x_predict_max - x_predict_min)
    x_predict += x_predict_min
    x_predict = x_predict[:, None]

    coords = x_predict

    expectations = gp.predict(coords)

    # Create History Matching Instance
    print("---TEST INPUTS---")
    print("No Args")
    hm = HistoryMatching()
    hm.status()

    print("Obs Only a - list")
    hm = HistoryMatching(obs=obs)
    hm.status()

    print("Obs only b - single-element list")
    hm = HistoryMatching(obs=[3.])
    hm.status()

    print("Obs only c - single-value")
    hm = HistoryMatching(obs=3.)
    hm.status()

    print("gp Only")
    hm = HistoryMatching(gp=gp)
    hm.status()

    print("Coords only a - 2d ndarray")
    hm = HistoryMatching(coords=coords)
    hm.status()

    print("Coords only b - 1d ndarray")
    hm = HistoryMatching(coords=np.random.rand(n_rand))
    hm.status()

    print("Coords only c - list")
    hm = HistoryMatching(coords=[a for a in range(n_rand)])
    hm.status()

    print("Expectation only")
    hm = HistoryMatching(expectations=expectations)
    hm.status()

    print("Threshold Only")
    hm = HistoryMatching(threshold=3.)
    hm.status()

    print("---TEST ASSIGNMENT---")
    print("Assign gp")
    hm = HistoryMatching(obs)
    hm.status()
    hm.set_gp(gp)
    hm.status()

    print("Assign Obs")
    hm = HistoryMatching(gp)
    hm.status()
    hm.set_obs(obs)
    hm.status()

    print("Assign Coords")
    hm = HistoryMatching()
    hm.status()
    hm.set_coords(coords)
    hm.status()

    print("Assign Expectations")
    hm = HistoryMatching()
    hm.status()
    hm.set_expectations(expectations)
    hm.status()

    print("Assign Threshold")
    hm = HistoryMatching()
    hm.status()
    hm.set_threshold(3.)
    hm.status()

    print("---TEST IMPLAUSABILIY---")
    print("implausibility test a - no vars")
    hm = HistoryMatching(obs=obs, gp=gp, coords=coords)
    I = hm.get_implausibility()

    print("implausibility test b - single value")
    hm = HistoryMatching(obs=obs, gp=gp, coords=coords)
    I = hm.get_implausibility(7.)
Exemplo n.º 9
0
def fit_GP_MAP(*args, n_tries=15, theta0=None, method="L-BFGS-B", **kwargs):
    """
    Fit one or more Gaussian Processes by attempting to minimize the negative log-posterior

    Fits the hyperparameters of one or more Gaussian Processes by attempting to minimize
    the negative log-posterior multiple times from a given starting location and using
    a particular minimization method. The best result found among all of the attempts is
    returned, unless all attempts to fit the parameters result in an error (see below).

    The arguments to the method can either be an existing ``GaussianProcess`` or
    ``MultiOutputGP`` instance, or a list of arguments to be passed to the ``__init__``
    method of ``GaussianProcess`` or ``MultiOutputGP`` if more than one output is detected.
    Keyword arguments for creating a new ``GaussianProcess`` or ``MultiOutputGP`` object can
    either be passed as part of the ``*args`` list or as keywords (if present in ``**kwargs``, they
    will be extracted and passed separately to the ``__init__`` method).

    If the method encounters an overflow (this can result because the parameter values stored are
    the logarithm of the actual hyperparameters to enforce positivity) or a linear algebra error
    (occurs when the covariance matrix cannot be inverted, even with additional noise added along
    the diagonal if adaptive noise was selected), the iteration is skipped. If all attempts to find
    optimal hyperparameters result in an error, then the method raises an exception.

    The ``theta0`` parameter is the point at which the first iteration will start. If more than
    one attempt is made, subsequent attempts will use random starting points. If you are fitting
    Multiple Outputs, then this argument can take any of the following forms: (1) None (random
    start points for all emulators), (2) a list of numpy arrays or ``NoneTypes`` with length
    ``n_emulators``, (3) a numpy array of shape ``(n_params,)`` or ``(n_emulators, n_params)``
    which with either use the same start point for all emulators or the specified start
    point for all emulators. Note that if you us a numpy array, all emulators must have the
    same number of parameters, while using a list allows more flexibility.

    The user can specify the details of the minimization method, using any of the gradient-based
    optimizers available in ``scipy.optimize.minimize``. Any additional parameters beyond the method
    specification can be passed as keyword arguments.

    The function returns a fit ``GaussianProcess`` or ``MultiOutputGP`` instance, either the original
    one passed to the function, or the new one created from the included arguments.

    :param ``*args``: Either a single ``GaussianProcess`` or ``MultiOutputGP`` instance,
                      or arguments to be passed to the ``__init__`` method when creating a new
                      ``GaussianProcess`` or ``MultiOutputGP`` instance.
    :param n_tries: Number of attempts to minimize the negative log-posterior function.
                    Must be a positive integer (optional, default is 15)
    :type n_tries: int
    :param theta0: Initial starting point for the first iteration. If present, must be
                   array-like with shape ``(n_params,)`` based on the specific
                   ``GaussianProcess`` being fit. If a ``MultiOutputGP`` is being fit
                   it must be a list of length ``n_emulators`` with each entry as either
                   ``None`` or a numpy array of shape ``(n_params,)``, or a numpy array
                   with shape ``(n_emulators, n_params)`` (note that if the various emulators
                   have different numbers of parameters, the numpy array option will not work).
                   If ``None`` is given, then a random value is chosen. (Default is ``None``)
    :type theta0: None or ndarray
    :param method: Minimization method to be used. Can be any gradient-based optimization
                   method available in ``scipy.optimize.minimize``. (Default is ``'L-BFGS-B'``)
    :type method: str
    :param ``**kwargs``: Additional keyword arguments to be passed to ``GaussianProcess.__init__``,
                         ``MultiOutputGP.__init__``, or the minimization routine. Relevant parameters
                         for the GP classes are automatically split out from those used in the
                         minimization function. See available parameters in the corresponding functions
                         for details.
    :returns: Fit GP or Multi-Output GP instance
    :rtype: GaussianProcess or MultiOutputGP
    """

    if len(args) == 1:
        gp = args[0]
        if isinstance(gp, MultiOutputGP):
            return _fit_MOGP_MAP(gp, n_tries, theta0, method, **kwargs)
        elif isinstance(gp, GaussianProcess):
            return _fit_single_GP_MAP(gp, n_tries, theta0, method, **kwargs)
        else:
            raise TypeError(
                "single arg to fit_GP_MAP must be a GaussianProcess or MultiOutputGP instance"
            )
    elif len(args) < 2:
        raise TypeError(
            "missing required inputs/targets arrays to GaussianProcess")
    else:
        gp_kwargs = {}
        for key in [
                "mean", "kernel", "priors", "nugget", "inputdict", "use_patsy"
        ]:
            if key in kwargs:
                gp_kwargs[key] = kwargs[key]
                del kwargs[key]
        try:
            gp = GaussianProcess(*args, **gp_kwargs)
            return _fit_single_GP_MAP(gp, n_tries, theta0, method, **kwargs)
        except AssertionError:
            gp = MultiOutputGP(*args, **gp_kwargs)
            return _fit_MOGP_MAP(gp, n_tries, theta0, method, **kwargs)
Exemplo n.º 10
0
    def __init__(self,
                 inputs,
                 targets,
                 mean=None,
                 kernel=SquaredExponential(),
                 priors=None,
                 nugget="adaptive",
                 inputdict={},
                 use_patsy=True):
        """
        Create a new multi-output GP Emulator
        """

        # check input types and shapes, reshape as appropriate for the case of a single emulator
        inputs = np.array(inputs)
        targets = np.array(targets)
        if len(inputs.shape) == 1:
            inputs = np.reshape(inputs, (-1, 1))
        if len(targets.shape) == 1:
            targets = np.reshape(targets, (1, -1))
        elif not (len(targets.shape) == 2):
            raise ValueError("targets must be either a 1D or 2D array")
        if not (len(inputs.shape) == 2):
            raise ValueError("inputs must be either a 1D or 2D array")
        if not (inputs.shape[0] == targets.shape[1]):
            raise ValueError(
                "the first dimension of inputs must be the same length as the second dimension of targets (or first if targets is 1D))"
            )

        self.n_emulators = targets.shape[0]
        self.n = inputs.shape[0]
        self.D = inputs.shape[1]

        if mean is None or isinstance(mean, str) or issubclass(
                type(mean), MeanBase):
            mean = self.n_emulators * [mean]

        assert isinstance(
            mean, list
        ), "mean must be None, a string, a mean function, or a list of None/string/mean functions"
        assert len(mean) == self.n_emulators

        if isinstance(kernel, str):
            if kernel == "SquaredExponential":
                kernel = SquaredExponential()
            elif kernel == "Matern52":
                kernel = Matern52()
            else:
                raise ValueError(
                    "provided kernel '{}' not a supported kernel type".format(
                        kernel))
        if issubclass(type(kernel), Kernel):
            kernel = self.n_emulators * [kernel]

        assert isinstance(
            kernel, list
        ), "kernel must be a Kernal subclass or a list of Kernel subclasses"
        assert len(kernel) == self.n_emulators

        if priors is None:
            priors = []
        assert isinstance(
            priors, list), "priors must be a list of lists of Priors/None"

        if len(priors) == 0:
            priors = self.n_emulators * [[]]

        if not isinstance(priors[0], list):
            priors = self.n_emulators * [priors]

        assert len(priors) == self.n_emulators

        if isinstance(nugget, (str, float)):
            nugget = self.n_emulators * [nugget]

        assert isinstance(
            nugget, list
        ), "nugget must be a string, float, or a list of strings and floats"
        assert len(nugget) == self.n_emulators

        self.emulators = [
            GaussianProcess(inputs, single_target, m, k, p, n, inputdict,
                            use_patsy)
            for (single_target, m, k, p,
                 n) in zip(targets, mean, kernel, priors, nugget)
        ]
Exemplo n.º 11
0
    def predict(self,
                testing,
                unc=True,
                deriv=True,
                include_nugget=True,
                processes=None):
        """
        Make a prediction for a set of input vectors

        Makes predictions for each of the emulators on a given set of input vectors. The
        input vectors must be passed as a ``(n_predict, D)`` or ``(D,)`` shaped array-like
        object, where ``n_predict`` is the number of different prediction points under
        consideration and ``D`` is the number of inputs to the emulator. If the prediction
        inputs array has shape ``(D,)``, then the method assumes ``n_predict == 1``.
        The prediction points are passed to each emulator and the predictions are collected
        into an ``(n_emulators, n_predict)`` shaped numpy array as the first return value
        from the method.

        Optionally, the emulator can also calculate the uncertainties in the predictions
        (as a variance) and the derivatives with respect to each input parameter. If the
        uncertainties are computed, they are returned as the second output from the method
        as an ``(n_emulators, n_predict)`` shaped numpy array. If the derivatives are
        computed, they are returned as the third output from the method as an
        ``(n_emulators, n_predict, D)`` shaped numpy array. Finally, if uncertainties
        are computed, the ``include_nugget`` flag determines if the uncertainties should
        include the nugget. By default, this is set to ``True``.

        As with the fitting, this computation can be done independently for each emulator
        and thus can be done in parallel.

        :param testing: Array-like object holding the points where predictions will be made.
                        Must have shape ``(n_predict, D)`` or ``(D,)`` (for a single prediction)
        :type testing: ndarray
        :param unc: (optional) Flag indicating if the uncertainties are to be computed.
                    If ``False`` the method returns ``None`` in place of the uncertainty
                    array. Default value is ``True``.
        :type unc: bool
        :param deriv: (optional) Flag indicating if the derivatives are to be computed.
                      If ``False`` the method returns ``None`` in place of the derivative
                      array. Default value is ``True``.
        :type deriv: bool
        :param include_nugget: (optional) Flag indicating if the nugget should be included
                               in the predictive variance. Only relevant if ``unc = True``.
                               Default is ``True``.
        :type include_nugget: bool
        :param processes: (optional) Number of processes to use when making the predictions.
                          Must be a positive integer or ``None`` to use the number of
                          processors on the computer (default is ``None``)
        :type processes: int or None
        :returns: Tuple of numpy arrays holding the predictions, uncertainties, and derivatives,
                  respectively. Predictions and uncertainties have shape ``(n_emulators, n_predict)``
                  while the derivatives have shape ``(n_emulators, n_predict, D)``. If
                  the ``do_unc`` or ``do_deriv`` flags are set to ``False``, then those arrays
                  are replaced by ``None``.
        :rtype: tuple
        """

        testing = np.array(testing)
        if testing.shape == (self.D, ):
            testing = np.reshape(testing, (1, self.D))
        assert len(testing.shape) == 2, "testing must be a 2D array"
        assert testing.shape[
            1] == self.D, "second dimension of testing must be the same as the number of input parameters"
        if not processes is None:
            processes = int(processes)
            assert processes > 0, "number of processes must be a positive integer"

        if platform.system() == "Windows":
            predict_vals = [
                GaussianProcess.predict(gp, testing, unc, deriv,
                                        include_nugget)
                for gp in self.emulators
            ]
        else:
            with Pool(processes) as p:
                predict_vals = p.starmap(
                    GaussianProcess.predict,
                    [(gp, testing, unc, deriv, include_nugget)
                     for gp in self.emulators])

        # repackage predictions into numpy arrays

        predict_unpacked, unc_unpacked, deriv_unpacked = [
            np.array(t) for t in zip(*predict_vals)
        ]

        if not unc:
            unc_unpacked = None
        if not deriv:
            deriv_unpacked = None

        return PredictResult(mean=predict_unpacked,
                             unc=unc_unpacked,
                             deriv=deriv_unpacked)