Exemplo n.º 1
0
Arquivo: train.py Projeto: NICTA/dora
def condition(X, y, kernelFn, hyper_opt_config_copys):
    assert len(y.shape) == 1  # y must be shapeless (n, )
    h_kernel, noise_std = hyper_opt_config_copys
    kernel = lambda x1, x2: kernelFn(x1, x2, h_kernel)
    noise_vector = predict.noise_vector(X, noise_std)
    L = linalg.cholesky(X, kernel, noise_vector)
    alpha = predict.alpha(y, L)
    return types.RegressionParams(X, L, alpha, kernel, y, noise_std)
Exemplo n.º 2
0
def condition(X, y, kernelFn, hyper_opt_config_copys):
    assert len(y.shape) == 1  #y must be shapeless (n, )
    h_kernel, noise_std = hyper_opt_config_copys
    kernel = lambda x1, x2: kernelFn(x1, x2, h_kernel)
    noise_vector = predict.noise_vector(X, noise_std)
    L = linalg.cholesky(X, kernel, noise_vector)
    alpha = predict.alpha(y, L)
    return types.RegressionParams(X, L, alpha, kernel, y, noise_std)
Exemplo n.º 3
0
def add_data(newX, newY, regressor, query=None, insertionID=None):
    assert (isinstance(regressor, types.RegressionParams))
    assert (not query or isinstance(query, types.QueryParams))
    assert (len(newX.shape) == 2)
    assert (len(newY.shape) == 1)

    if not (insertionID):  #No insterionID provide. Append data to the end.
        # Compute the new rows and columns of the covariance matrix
        Kxn = regressor.kernel(regressor.X, newX)
        Knn = regressor.kernel(newX, newX)
        nn_noise_std = predict.noise_vector(newX, regressor.noise_std)
        # Update the regression opt_config_copys
        regressor.X = np.vstack((regressor.X, newX))
        regressor.y = np.hstack((regressor.y, newY))
        regressor.L = chol_up(regressor.L, Kxn, Knn, nn_noise_std)
        # sadly, this is still expensive. However osborne's thesis appendix B can
        # be used to speed up this step too. Maybe by a factor of 2.
        regressor.alpha = predict.alpha(regressor.y, regressor.L)

        # Optionally update the query
        if query is not None:
            Kxsn = regressor.kernel(newX, query.Xs)
            query.K_xxs = np.vstack((query.K_xxs, Kxsn))
    else:
        # Compute the new rows and columns of the covariance matrix
        Kx1n = regressor.kernel(regressor.X[:insertionID, :], newX)
        Knx2 = regressor.kernel(newX, regressor.X[insertionID:, :])
        Knn = regressor.kernel(newX, newX)
        nn_noise_std = predict.noise_vector(newX, regressor.noise_std)
        regressor.X = np.vstack(
            (regressor.X[:insertionID, :], newX, regressor.X[insertionID:, :]))
        regressor.y = np.hstack(
            (regressor.y[:insertionID], newY, regressor.y[insertionID:]))
        regressor.L = chol_up_insert(regressor.L, Kx1n, Knx2, Knn,
                                     nn_noise_std, insertionID)
        # sadly, this is still expensive. However osborne's thesis appendix B can
        # be used to speed up this step too. Maybe by a factor of 2.
        regressor.alpha = predict.alpha(regressor.y, regressor.L)

        if query is not None:
            Kxsn = regressor.kernel(newX, query.Xs)
            query.K_xxs = np.vstack((query.K_xxs[:insertionID, :], Kxsn,
                                     query.K_xxs[insertionID:, :]))
Exemplo n.º 4
0
Arquivo: train.py Projeto: NICTA/dora
 def criterion(sigma, noise):
     k = lambda x1, x2: cov_fn(x1, x2, sigma)
     X_noise = predict.noise_vector(X, noise)
     L = linalg.cholesky(X, k, X_noise)
     a = predict.alpha(Y, L)
     if optCrition == "logMarg":
         val = negative_log_marginal_likelihood(Y, L, a)
     elif optCrition == "crossVal":
         val = negative_log_prob_cross_val(Y, L, a)
     if verbose is True:
         print("[" + str(val) + "]  ", sigma, noise)
     return val
Exemplo n.º 5
0
 def criterion(sigma, noise):
     k = lambda x1, x2: cov_fn(x1, x2, sigma)
     X_noise = predict.noise_vector(X, noise)
     L = linalg.cholesky(X, k, X_noise)
     a = predict.alpha(Y, L)
     if optCrition == 'logMarg':
         val = negative_log_marginal_likelihood(Y, L, a)
     elif optCrition == 'crossVal':
         val = negative_log_prob_cross_val(Y, L, a)
     if verbose is True:
         print('[' + str(val) + ']  ', sigma, noise)
     return val
Exemplo n.º 6
0
Arquivo: train.py Projeto: NICTA/dora
def add_data(newX, newY, regressor, query=None, insertionID=None):
    assert isinstance(regressor, types.RegressionParams)
    assert not query or isinstance(query, types.QueryParams)
    assert len(newX.shape) == 2
    assert len(newY.shape) == 1

    if not (insertionID):  # No insterionID provide. Append data to the end.
        # Compute the new rows and columns of the covariance matrix
        Kxn = regressor.kernel(regressor.X, newX)
        Knn = regressor.kernel(newX, newX)
        nn_noise_std = predict.noise_vector(newX, regressor.noise_std)
        # Update the regression opt_config_copys
        regressor.X = np.vstack((regressor.X, newX))
        regressor.y = np.hstack((regressor.y, newY))
        regressor.L = chol_up(regressor.L, Kxn, Knn, nn_noise_std)
        # sadly, this is still expensive. However osborne's thesis appendix B can
        # be used to speed up this step too. Maybe by a factor of 2.
        regressor.alpha = predict.alpha(regressor.y, regressor.L)

        # Optionally update the query
        if query is not None:
            Kxsn = regressor.kernel(newX, query.Xs)
            query.K_xxs = np.vstack((query.K_xxs, Kxsn))
    else:
        # Compute the new rows and columns of the covariance matrix
        Kx1n = regressor.kernel(regressor.X[:insertionID, :], newX)
        Knx2 = regressor.kernel(newX, regressor.X[insertionID:, :])
        Knn = regressor.kernel(newX, newX)
        nn_noise_std = predict.noise_vector(newX, regressor.noise_std)
        regressor.X = np.vstack((regressor.X[:insertionID, :], newX, regressor.X[insertionID:, :]))
        regressor.y = np.hstack((regressor.y[:insertionID], newY, regressor.y[insertionID:]))
        regressor.L = chol_up_insert(regressor.L, Kx1n, Knx2, Knn, nn_noise_std, insertionID)
        # sadly, this is still expensive. However osborne's thesis appendix B can
        # be used to speed up this step too. Maybe by a factor of 2.
        regressor.alpha = predict.alpha(regressor.y, regressor.L)

        if query is not None:
            Kxsn = regressor.kernel(newX, query.Xs)
            query.K_xxs = np.vstack((query.K_xxs[:insertionID, :], Kxsn, query.K_xxs[insertionID:, :]))
Exemplo n.º 7
0
Arquivo: train.py Projeto: NICTA/dora
def remove_data(regressor, remID, query=None):
    assert isinstance(regressor, types.RegressionParams)
    assert not query or isinstance(query, types.QueryParams)

    regressor.X = np.delete(regressor.X, remID, axis=0)
    regressor.y = np.delete(regressor.y, remID, axis=0)
    # regressor.L = chol_down(regressor.L, remID)

    noise_vector = predict.noise_vector(regressor.X, regressor.noise_std)
    regressor.L = linalg.cholesky(regressor.X, regressor.kernel, noise_vector)
    regressor.alpha = predict.alpha(regressor.y, regressor.L)

    # Optionally update the query
    if query is not None:
        query.K_xxs = np.delete(query.K_xxs, remID, axis=0)
Exemplo n.º 8
0
def remove_data(regressor, remID, query=None):
    assert (isinstance(regressor, types.RegressionParams))
    assert (not query or isinstance(query, types.QueryParams))

    regressor.X = np.delete(regressor.X, remID, axis=0)
    regressor.y = np.delete(regressor.y, remID, axis=0)
    # regressor.L = chol_down(regressor.L, remID)

    noise_vector = predict.noise_vector(regressor.X, regressor.noise_std)
    regressor.L = linalg.cholesky(regressor.X, regressor.kernel, noise_vector)
    regressor.alpha = predict.alpha(regressor.y, regressor.L)

    # Optionally update the query
    if query is not None:
        query.K_xxs = np.delete(query.K_xxs, remID, axis=0)
Exemplo n.º 9
0
Arquivo: train.py Projeto: NICTA/dora
 def criterion(sigma, noise):
     k = lambda x1, x2: cov_fn(x1, x2, sigma)
     val = 0
     for f in range(folds.n_folds):
         Xf = folds.X[f]
         Yf = folds.flat_y[f]
         Xf_noise = predict.noise_vector(Xf, noise)
         Lf = linalg.cholesky(Xf, k, Xf_noise)
         af = predict.alpha(Yf, Lf)
         if optCrition == "logMarg":
             val += negative_log_marginal_likelihood(Yf, Lf, af)
         elif optCrition == "crossVal":
             val += negative_log_prob_cross_val(Yf, Lf, af)
     if verbose is True:
         print("[" + str(val) + "]  ", sigma, noise)
     return val
Exemplo n.º 10
0
 def criterion(sigma, noise):
     k = lambda x1, x2: cov_fn(x1, x2, sigma)
     val = 0
     for f in range(folds.n_folds):
         Xf = folds.X[f]
         Yf = folds.flat_y[f]
         Xf_noise = predict.noise_vector(Xf, noise)
         Lf = linalg.cholesky(Xf, k, Xf_noise)
         af = predict.alpha(Yf, Lf)
         if optCrition == 'logMarg':
             val += negative_log_marginal_likelihood(Yf, Lf, af)
         elif optCrition == 'crossVal':
             val += negative_log_prob_cross_val(Yf, Lf, af)
     if verbose is True:
         print('[' + str(val) + ']  ', sigma, noise)
     return val