def main(readcsv=read_csv, method='defaultDense'):
    # Input data set parameters
    train_file = os.path.join('data', 'batch', 'k_nearest_neighbors_train.csv')
    predict_file = os.path.join('data', 'batch',
                                'k_nearest_neighbors_test.csv')

    # Read data. Let's use 5 features per observation
    nFeatures = 5
    nClasses = 5
    train_data = readcsv(train_file, range(nFeatures))
    train_labels = readcsv(train_file, range(nFeatures, nFeatures + 1))

    # Create an algorithm object and call compute
    train_algo = d4p.bf_knn_classification_training(nClasses=nClasses)
    # 'weights' is optional argument, let's use equal weights
    # in this case results must be the same as without weights
    weights = np.ones((train_data.shape[0], 1))
    train_result = train_algo.compute(train_data, train_labels, weights)

    # Now let's do some prediction
    predict_data = readcsv(predict_file, range(nFeatures))
    predict_labels = readcsv(predict_file, range(nFeatures, nFeatures + 1))

    # Create an algorithm object and call compute
    predict_algo = d4p.bf_knn_classification_prediction(nClasses=nClasses)
    predict_result = predict_algo.compute(predict_data, train_result.model)

    # We expect less than 170 mispredicted values
    assert np.count_nonzero(predict_labels != predict_result.prediction) < 170

    return (train_result, predict_result, predict_labels)
def compute(train_data, train_labels, predict_data, nClasses):
    # Create an algorithm object and call compute
    train_algo = d4p.bf_knn_classification_training(nClasses=nClasses)
    train_result = train_algo.compute(train_data, train_labels)

    # Create an algorithm object and call compute
    predict_algo = d4p.bf_knn_classification_prediction(nClasses=nClasses)
    predict_result = predict_algo.compute(predict_data, train_result.model)
    return predict_result
def main(readcsv=read_csv, method='defaultDense'):
    # Input data set parameters
    train_file = os.path.join('..', 'data', 'batch',
                              'k_nearest_neighbors_train.csv')
    predict_file = os.path.join('..', 'data', 'batch',
                                'k_nearest_neighbors_test.csv')

    # Read data. Let's use 5 features per observation
    nFeatures = 5
    nClasses = 5
    train_data = readcsv(train_file, range(nFeatures))
    train_labels = readcsv(train_file, range(nFeatures, nFeatures + 1))
    predict_data = readcsv(predict_file, range(nFeatures))
    predict_labels = readcsv(predict_file, range(nFeatures, nFeatures + 1))

    train_data = to_numpy(train_data)
    train_labels = to_numpy(train_labels)
    predict_data = to_numpy(predict_data)

    try:
        from dppl import device_context, device_type
        gpu_context = lambda: device_context(device_type.gpu, 0)
    except:
        from daal4py.oneapi import sycl_context
        gpu_context = lambda: sycl_context('gpu')

    # It is possible to specify to make the computations on GPU
    with gpu_context():
        sycl_train_data = sycl_buffer(train_data)
        sycl_train_labels = sycl_buffer(train_labels)
        sycl_predict_data = sycl_buffer(predict_data)

        # Create an algorithm object and call compute
        train_algo = d4p.bf_knn_classification_training(nClasses=nClasses)
        train_result = train_algo.compute(sycl_train_data, sycl_train_labels)

        # Create an algorithm object and call compute
        predict_algo = d4p.bf_knn_classification_prediction()
        predict_result = predict_algo.compute(sycl_predict_data,
                                              train_result.model)

    # We expect less than 170 mispredicted values
    assert np.count_nonzero(predict_labels != predict_result.prediction) < 170

    return (predict_result, predict_labels)