コード例 #1
0
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import numpy as np
import skcuda.linalg as linalg
from skcuda.linalg import PCA as cuPCA

# map the data to 4 dimensions
pca = cuPCA(n_components=4)

# 1000 samples of 100-dimensional data vectors
X = np.random.rand(1000,100)

# note that order="F" or a transpose is necessary. fit_transform requires row-major matrices, and column-major is the default
X_gpu = gpuarray.GPUArray((1000,100), np.float64, order="F")

# copy data to gpu
X_gpu.set(X)

# calculate the principal components
T_gpu = pca.fit_transform(X_gpu)

# show that the resulting eigenvectors are orthogonal
dot_product = linalg.dot(T_gpu[:,0], T_gpu[:,1])
print(dot_product)
コード例 #2
0
#!/usr/bin/env python

import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import numpy as np
import skcuda.linalg as linalg
from skcuda.linalg import PCA as cuPCA
from matplotlib import pyplot as plt
from sklearn import datasets

iris = datasets.load_iris()
X_orig = iris.data
y = iris.target

pca = cuPCA(4)  # take all 4 principal components

demo_types = [np.float32, np.float64]  # we can use single or double precision
precisions = ['single', 'double']

print("Principal Component Analysis Demo!")
print("Compute 2 principal components of a 1000x4 IRIS data matrix")
print(
    "Lets test if the first two resulting eigenvectors (principal components) are orthogonal,"
    " by dotting them and seeing if it is about zero, then we can see the amount of the origial"
    " variance explained by just two of the original 4 dimensions. Then we will plot the reults"
    " for the double precision experiment.\n\n\n")

for i in range(len(demo_types)):

    demo_type = demo_types[i]
コード例 #3
0
#!/usr/bin/env python

# This is one of the demo routines from Givon et al.'s Scikit-Cuda
# https://github.com/lebedov/scikit-cuda

import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import numpy as np
import skcuda.linalg as linalg
from skcuda.linalg import PCA as cuPCA
import skcuda.misc as cumisc
 
pca = cuPCA() # take all principal components

demo_types = [np.float32, np.float64] # we can use single or double precision
precisions = ['single', 'double']

print("Principal Component Analysis Demo!")
print("Compute all 100 principal components of a 1000x100 data matrix")
print("Lets test if the first two resulting eigenvectors (principal components) are orthogonal, by dotting them and seeing if it is about zero, then we can see the amount of the origial variance explained by just two of the original 100 dimensions.\n\n\n")

for i in range(len(demo_types)):

	demo_type = demo_types[i]
	X = np.random.rand(1000,100).astype(demo_type) # 1000 samples of 100-dimensional data vectors
	X_gpu = gpuarray.GPUArray((1000,100), demo_type, order="F") # note that order="F" or a transpose is necessary. fit_transform requires row-major matrices, and column-major is the default
	X_gpu.set(X) # copy data to gpu
	T_gpu = pca.fit_transform(X_gpu) # calculate the principal components
	dot_product = linalg.dot(T_gpu[:,0], T_gpu[:,1]) # show that the resulting eigenvectors are orthogonal 
	print("The dot product of the two " + str(precisions[i]) + " precision eigenvectors is: " + str(dot_product))
	# now get the variance of each eigenvector so we can see the percent explained by the first two
コード例 #4
0
ファイル: pca.py プロジェクト: tjohanne/PCA
from datetime import datetime as dt
from skcuda.linalg import PCA as cuPCA

dataset_name = "mnist_784.csv"
data_dir = "../../files/"
file_name = data_dir + dataset_name
X = pd.read_csv(file_name)
if 'class' in X:
    X = X.drop('class', axis=1)
if 'target' in X:
    X = X.drop('target', axis=1)
if 'variety' in X:
    X = X.drop('variety', axis=1)
X = np.array(X.values, dtype="float64")
samples, features = X.shape
n_components = features
print(
    "SKCUDA Running PCA with {} features, {} samples, and {} n_components on dataset {}"
    .format(X.shape[1], X.shape[0], n_components, dataset_name))
time_init_pca = dt.now()
pca = cuPCA(n_components=n_components)  # map the data to 4 dimensions
X_gpu = gpuarray.GPUArray(
    (samples, features), np.float64, order="F"
)  # note that order="F" or a transpose is necessary. fit_transform requires row-major matrices, and column-major is the default
X_gpu.set(X)  # copy data to gpu
T_gpu = pca.fit_transform(X_gpu)  # calculate the principal components
print(linalg.dot(T_gpu[:, 0], T_gpu[:, 1]))
print("get_n_components()", pca.get_n_components())

print("SKCUDA Total time {}ms".format(
    (dt.now() - time_init_pca).microseconds / 1000))
コード例 #5
0
def show_local_descr(model_dir, im_fls, cls):
    vh = calc2.vh
    vw = calc2.vw
    im_fls = im_fls.split(',')
    assert len(im_fls) == 3
    import pycuda.autoinit
    import pycuda.gpuarray as gpuarray
    import skcuda.linalg as linalg
    from skcuda.linalg import PCA as cuPCA
    import skcuda.misc as cumisc
    import matplotlib.patches as mpatches

    N = 2
    ims = np.empty((3, vh, vw, 3), dtype=np.float32)
    for i in range(len(im_fls)):
        ims[i] = cv2.cvtColor(cv2.resize(cv2.imread(im_fls[i]),
                                         (vw, vh)), cv2.COLOR_BGR2RGB) / 255.
    with tf.Session() as sess:
        calc = utils.CALC2(model_dir, sess)
        d = calc.run(ims).reshape(
            (3, vh // 16 * vw // 16, 4 * (1 + len(calc_classes.keys()))))
        didx = 4 * (1 + calc_classes[cls[0]])
        d_cls = d[:, :, didx:didx + 4].reshape(3, -1)
        didx2 = 4 * (1 + calc_classes[cls[1]])
        d_cls2 = d[:, :, didx2:didx2 + 4].reshape(3, -1)
        d_app = d[:, :, :4].reshape(3, -1)

    pca = cuPCA(N)
    dcls_gpu = gpuarray.GPUArray(d_cls.shape, np.float32, order="F")
    dcls_gpu.set(d_cls)  # copy data to gpu
    dcc1 = pca.fit_transform(
        dcls_gpu).get()  # calculate the principal components
    dcc1 = dcc1 / np.linalg.norm(dcc1, axis=-1)[..., np.newaxis]

    dcls2_gpu = gpuarray.GPUArray(d_cls.shape, np.float32, order="F")
    dcls2_gpu.set(d_cls2)  # copy data to gpu
    dcc2 = pca.fit_transform(
        dcls2_gpu).get()  # calculate the principal components
    dcc2 = dcc2 / np.linalg.norm(dcc2, axis=-1)[..., np.newaxis]

    dapp_gpu = gpuarray.GPUArray(d_cls.shape, np.float32, order="F")
    dapp_gpu.set(d_app)  # copy data to gpu
    dac = pca.fit_transform(
        dapp_gpu).get()  # calculate the principal components
    dac = dac / np.linalg.norm(dac, axis=-1)[..., np.newaxis]

    minx = -1.1  #min(np.min(dac[:,0]), np.min(dcc1[:,0]))-.1
    maxx = 1.1  # max(np.max(dac[:,0]), np.max(dcc1[:,0]))+.1
    miny = -1.1  #min(np.min(dac[:,1]), np.min(dcc1[:,1]))-.1
    maxy = 1.1  #max(np.max(dac[:,1]), np.max(dcc1[:,1]))+.1
    '''
    minz = min(np.min(dac[:,2]), np.min(dcc[:,2]))
    maxz = max(np.max(dac[:,2]), np.max(dcc[:,2]))
    '''
    x = np.zeros_like(dac[:, 0])

    rcParams['font.sans-serif'] = 'DejaVu Sans'
    rcParams['font.size'] = 10
    rcParams['patch.linewidth'] = .5
    rcParams['figure.figsize'] = [8.0, 3.0]
    rcParams['figure.subplot.bottom'] = 0.2
    rcParams['savefig.dpi'] = 200.0
    rcParams['figure.dpi'] = 200.0

    fig = plt.figure()
    ax = fig.add_subplot(131, aspect='equal')
    ax.quiver(x,
              x,
              dcc1[:, 0],
              dcc1[:, 1],
              color=['b', 'g', 'r'],
              scale=1,
              units='xy',
              width=.02)
    ax.set_xticks([])
    ax.set_yticks([])
    ax.set_xlim([minx, maxx])
    ax.set_ylim([miny, maxy])
    plt.title(cls[0])

    ax = fig.add_subplot(132, aspect='equal')
    ax.quiver(x,
              x,
              dcc2[:, 0],
              dcc2[:, 1],
              color=['b', 'g', 'r'],
              scale=1,
              units='xy',
              width=.02)
    ax.set_xticks([])
    ax.set_yticks([])
    ax.set_xlim([minx, maxx])
    ax.set_ylim([miny, maxy])
    plt.title(cls[1])

    ax = fig.add_subplot(133, aspect='equal')
    ax.quiver(x,
              x,
              dac[:, 0],
              dac[:, 1],
              color=['b', 'g', 'r'],
              scale=1,
              units='xy',
              width=.02)
    ax.set_xticks([])
    ax.set_yticks([])
    ax.set_xlim([minx, maxx])
    ax.set_ylim([miny, maxy])
    plt.title('appearance')

    l1 = mpatches.Patch(color='b', label='database')
    l2 = mpatches.Patch(color='g', label='positive')
    l3 = mpatches.Patch(color='r', label='negative')
    h = plt.legend(handles=[l1, l2, l3])
    h.get_frame().set_alpha(0.0)  # transluscent legend :D
    h.set_draggable(True)
    plt.show()
コード例 #6
0
#!/usr/bin/env python

# This is one of the demo routines from Givon et al.'s Scikit-Cuda
# https://github.com/lebedov/scikit-cuda

import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import numpy as np
import skcuda.linalg as linalg
from skcuda.linalg import PCA as cuPCA
import skcuda.misc as cumisc

pca = cuPCA()  # take all principal components

demo_types = [np.float32, np.float64]  # we can use single or double precision
precisions = ['single', 'double']

print("Principal Component Analysis Demo!")
print("Compute all 100 principal components of a 1000x100 data matrix")
print(
    "Lets test if the first two resulting eigenvectors (principal components) are orthogonal, by dotting them and seeing if it is about zero, then we can see the amount of the origial variance explained by just two of the original 100 dimensions.\n\n\n"
)

for i in range(len(demo_types)):

    demo_type = demo_types[i]
    X = np.random.rand(1000, 100).astype(
        demo_type)  # 1000 samples of 100-dimensional data vectors
    X_gpu = gpuarray.GPUArray(
        (1000, 100), demo_type, order="F"
    )  # note that order="F" or a transpose is necessary. fit_transform requires row-major matrices, and column-major is the default