Beispiel #1
0
def test_predict(X, training_result, params):

    fptype = getFPType(X)
    kf = daal_kernel(params.kernel, fptype, gamma=params.gamma)

    svm_predict = svm_prediction(fptype=fptype,
                                 method='defaultDense',
                                 kernel=kf)
    if params.n_classes == 2:
        prdct = svm_predict
    else:
        prdct = multi_class_classifier_prediction(nClasses=params.n_classes,
                                                  fptype=fptype,
                                                  maxIterations=params.maxiter,
                                                  accuracyThreshold=params.tol,
                                                  pmethod='voteBased',
                                                  tmethod='oneAgainstOne',
                                                  prediction=svm_predict)

    res = prdct.compute(X, training_result.model)

    if params.n_classes == 2:
        y_predict = np.greater(res.prediction.ravel(), 0)
    else:
        y_predict = res.prediction.ravel()

    return y_predict
Beispiel #2
0
def pca_transform_daal(pca_result,
                       X,
                       n_components,
                       fit_n_samples,
                       eigenvalues,
                       eigenvectors,
                       whiten=False,
                       scale_eigenvalues=False):

    fptype = getFPType(X)

    tr_data = {}
    tr_data['mean'] = pca_result.dataForTransform['mean']

    if whiten:
        if scale_eigenvalues:
            tr_data['eigenvalue'] = (fit_n_samples - 1) \
                * pca_result.eigenvalues
        else:
            tr_data['eigenvalue'] = pca_result.eigenvalues
    elif scale_eigenvalues:
        tr_data['eigenvalue'] = np.full((1, pca_result.eigenvalues.size),
                                        fit_n_samples - 1,
                                        dtype=X.dtype)

    transform_algorithm = pca_transform(fptype=fptype,
                                        nComponents=n_components)
    transform_result = transform_algorithm.compute(X, pca_result.eigenvectors,
                                                   tr_data)
    return transform_result.transformedData
Beispiel #3
0
def df_regr_fit(X,
                y,
                n_trees=100,
                seed=12345,
                n_features_per_node=0,
                max_depth=0,
                min_impurity=0,
                bootstrap=True):

    fptype = getFPType(X)

    features_per_node = X.shape[1]
    if n_features_per_node > 0 and n_features_per_node <= features_per_node:
        features_per_node = n_features_per_node

    engine = engines_mt2203(seed=seed, fptype=fptype)

    algorithm = decision_forest_regression_training(
        fptype=fptype,
        method='defaultDense',
        nTrees=n_trees,
        observationsPerTreeFraction=1.,
        featuresPerNode=features_per_node,
        maxTreeDepth=max_depth,
        minObservationsInLeafNode=1,
        engine=engine,
        impurityThreshold=min_impurity,
        varImportance='MDI',
        resultsToCompute='',
        memorySavingMode=False,
        bootstrap=bootstrap)

    df_regr_result = algorithm.compute(X, y)

    return df_regr_result
Beispiel #4
0
def test_predict(X, X_init):
    algorithm = kmeans(fptype=getFPType(X),
                       nClusters=params.n_clusters,
                       maxIterations=0,
                       assignFlag=True,
                       accuracyThreshold=0.0)
    return algorithm.compute(X, X_init)
Beispiel #5
0
    def __init__(self, X, y, beta, hess=False, fit_intercept=True):
        self.compute_hess = hess
        self.n = X.shape[0]
        self.fptype = getFPType(X)
        self.fit_intercept = fit_intercept
        self.X = make2d(X)
        self.y = make2d(y)

        self.last_beta = beta.copy()

        self.func = None
        self.grad = None
        self.hess = None
Beispiel #6
0
def test_fit(X, y, params):

    fptype = getFPType(X)
    kf = daal_kernel(params.kernel, fptype, gamma=params.gamma)

    if params.n_classes == 2:
        y[y == 0] = -1
    else:
        y[y == -1] = 0

    svm_train = svm_training(fptype=fptype,
                             C=params.C,
                             maxIterations=params.maxiter,
                             tau=params.tau,
                             cacheSize=params.cache_size_bytes,
                             accuracyThreshold=params.tol,
                             doShrinking=params.shrinking,
                             kernel=kf)

    if params.n_classes == 2:
        clf = svm_train
    else:
        clf = multi_class_classifier_training(fptype=fptype,
                                              nClasses=params.n_classes,
                                              accuracyThreshold=params.tol,
                                              method='oneAgainstOne',
                                              maxIterations=params.maxiter,
                                              training=svm_train)

    training_result = clf.compute(X, y)

    support = construct_dual_coefs(training_result.model, params.n_classes, X,
                                   y)
    indices = y.take(support, axis=0)
    if params.n_classes == 2:
        n_support_ = np.array([np.sum(indices == -1),
                               np.sum(indices == 1)],
                              dtype=np.int32)
    else:
        n_support_ = np.array([
            np.sum(indices == c)
            for c in [-1] + list(range(1, params.n_classes))
        ],
                              dtype=np.int32)

    return training_result, support, indices, n_support_
Beispiel #7
0
def test_predict(X, beta, intercept=0, multi_class='ovr'):

    fptype = getFPType(X)

    scores = np.dot(X, beta.T) + intercept
    if multi_class == 'ovr':
        # use binary logistic regressions and normalize
        logistic = math_logistic(fptype=fptype, method='defaultDense')
        prob = logistic.compute(scores).value
        if prob.shape[1] == 1:
            return np.c_[1 - prob, prob]
        else:
            return prob / prob.sum(axis=1)[:, np.newaxis]
    else:
        # use softmax of exponentiated scores
        if scores.shape[1] == 1:
            scores = np.c_[-scores, scores]
        softmax = math_softmax(fptype=fptype, method='defaultDense')
        return softmax.compute(scores).value
Beispiel #8
0
def pca_fit_daal(X, n_components, method):

    if n_components < 1:
        n_components = min(X.shape)

    fptype = getFPType(X)

    centering_algo = normalization_zscore(fptype=fptype, doScale=False)

    pca_algorithm = pca(fptype=fptype,
                        method=method,
                        normalization=centering_algo,
                        resultsToCompute='mean|variance|eigenvalue',
                        isDeterministic=True,
                        nComponents=n_components)

    pca_result = pca_algorithm.compute(X)
    eigenvectors = pca_result.eigenvectors
    eigenvalues = pca_result.eigenvalues.ravel()
    singular_values = np.sqrt((X.shape[0] - 1) * eigenvalues)

    return pca_result, eigenvalues, eigenvectors, singular_values
def compute_distances(pairwise_distances, X):
    algorithm = pairwise_distances(fptype=getFPType(X))
    return algorithm.compute(X)
Beispiel #10
0
def test_dbscan(X):
    algorithm = dbscan(fptype=getFPType(X),
                       epsilon=params.eps,
                       minObservations=params.min_samples,
                       resultsToCompute='computeCoreIndices')
    return algorithm.compute(X)
Beispiel #11
0
def test_predict(Xp, model):
    regr_predict = linear_regression_prediction(fptype=getFPType(Xp))
    return regr_predict.compute(Xp, model)
Beispiel #12
0
def test_fit(X, y):
    regr_train = linear_regression_training(fptype=getFPType(X),
                                            method=params.method,
                                            interceptFlag=params.fit_intercept)
    return regr_train.compute(X, y)
Beispiel #13
0
def test_fit(X, y):
    regr_train = ridge_regression_training(fptype=getFPType(X),
                                           ridgeParameters=np.array(
                                               [[params.alpha]]),
                                           interceptFlag=params.fit_intercept)
    return regr_train.compute(X, y)