Пример #1
0
def main(readcsv=None, method='defaultDense'):
    infile = os.path.join('..', 'data', 'batch', 'covcormoments_dense.csv')

    # Using of the classic way (computations on CPU)
    # configure a covariance object
    algo = d4p.covariance(streaming=True)
    # get the generator (defined in stream.py)...
    rn = read_next(infile, 112, readcsv)
    # ... and iterate through chunks/stream
    for chunk in rn:
        algo.compute(chunk)
    # finalize computation
    result_classic = algo.finalize()

    try:
        from dpctx import device_context, device_type
        gpu_context = lambda: device_context(device_type.gpu, 0)
        cpu_context = lambda: device_context(device_type.cpu, 0)
    except:
        from daal4py.oneapi import sycl_context
        gpu_context = lambda: sycl_context('gpu')
        cpu_context = lambda: sycl_context('cpu')

    # It is possible to specify to make the computations on GPU
    if gpu_available:
        with gpu_context():
            # configure a covariance object
            algo = d4p.covariance(streaming=True)
            # get the generator (defined in stream.py)...
            rn = read_next(infile, 112, readcsv)
            # ... and iterate through chunks/stream
            for chunk in rn:
                sycl_chunk = sycl_buffer(to_numpy(chunk))
                algo.compute(sycl_chunk)
            # finalize computation
            result_gpu = algo.finalize()
        assert np.allclose(result_classic.covariance, result_gpu.covariance)
        assert np.allclose(result_classic.mean, result_gpu.mean)
        assert np.allclose(result_classic.correlation, result_gpu.correlation)

    # It is possible to specify to make the computations on CPU
    with cpu_context():
        # configure a covariance object
        algo = d4p.covariance(streaming=True)
        # get the generator (defined in stream.py)...
        rn = read_next(infile, 112, readcsv)
        # ... and iterate through chunks/stream
        for chunk in rn:
            sycl_chunk = sycl_buffer(to_numpy(chunk))
            algo.compute(sycl_chunk)
        # finalize computation
        result_cpu = algo.finalize()

    # covariance result objects provide correlation, covariance and mean

    assert np.allclose(result_classic.covariance, result_cpu.covariance)
    assert np.allclose(result_classic.mean, result_cpu.mean)
    assert np.allclose(result_classic.correlation, result_cpu.correlation)

    return result_classic
Пример #2
0
def main(readcsv=None, method='defaultDense'):
    # read data from file
    infile = os.path.join('..', 'data', 'batch', 'covcormoments_dense.csv')

    # Using of the classic way (computations on CPU)
    # Configure a low order moments object for streaming
    algo = d4p.low_order_moments(streaming=True)
    # get the generator (defined in stream.py)...
    rn = read_next(infile, 55, readcsv)
    # ... and iterate through chunks/stream
    for chunk in rn:
        algo.compute(chunk)
    # finalize computation
    result_classic = algo.finalize()

    # It is possible to specify to make the computations on GPU
    with sycl_context('gpu'):
        # Configure a low order moments object for streaming
        algo = d4p.low_order_moments(streaming=True)
        # get the generator (defined in stream.py)...
        rn = read_next(infile, 55, readcsv)
        # ... and iterate through chunks/stream
        for chunk in rn:
            sycl_chunk = sycl_buffer(to_numpy(chunk))
            algo.compute(sycl_chunk)
        # finalize computation
        result_gpu = algo.finalize()

    # It is possible to specify to make the computations on CPU
    with sycl_context('cpu'):
        # Configure a low order moments object for streaming
        algo = d4p.low_order_moments(streaming=True)
        # get the generator (defined in stream.py)...
        rn = read_next(infile, 55, readcsv)
        # ... and iterate through chunks/stream
        for chunk in rn:
            sycl_chunk = sycl_buffer(to_numpy(chunk))
            algo.compute(sycl_chunk)
        # finalize computation
        result_cpu = algo.finalize()

    # result provides minimum, maximum, sum, sumSquares, sumSquaresCentered,
    # mean, secondOrderRawMoment, variance, standardDeviation, variation
    for name in [
            'minimum', 'maximum', 'sum', 'sumSquares', 'sumSquaresCentered',
            'mean', 'secondOrderRawMoment', 'variance', 'standardDeviation',
            'variation'
    ]:
        assert np.allclose(getattr(result_classic, name),
                           getattr(result_gpu, name))
        assert np.allclose(getattr(result_classic, name),
                           getattr(result_cpu, name))

    return result_classic
def main(readcsv=None, method='defaultDense'):
    infile = "./data/batch/covcormoments_dense.csv"

    # configure a covariance object
    algo = d4p.covariance(streaming=True)

    # get the generator (defined in stream.py)...
    rn = read_next(infile, 112, readcsv)
    # ... and iterate through chunks/stream
    for chunk in rn:
        algo.compute(chunk)

    # finalize computation
    result = algo.finalize()

    return result
Пример #4
0
def main(readcsv=None, method='svdDense'):
    infile = "./data/batch/qr.csv"

    # configure a QR object
    algo = d4p.qr(streaming=True)

    # get the generator (defined in stream.py)...
    rn = read_next(infile, 112, readcsv)
    # ... and iterate through chunks/stream
    for chunk in rn:
        algo.compute(chunk)

    # finalize computation
    result = algo.finalize()

    # QR result objects provide matrixQ and matrixR
    return result