コード例 #1
0
def run_algorithm(n_test, tree_depth=None, algorithm="buffer_kd_tree"):

    print("----------------------------------------------------------------------")
    print("\n\nRunning %s for n_test=%i ...\n" % (algorithm, n_test))
    print("----------------------------------------------------------------------")

    Xtest_local = Xtest[:n_test, :]

    # instantiate model
    nbrs = NearestNeighbors(n_neighbors=n_neighbors, \
                            algorithm=algorithm, \
                            n_jobs=n_jobs, \
                            tree_depth=opt_tree_depth, \
                            plat_dev_ids=plat_dev_ids, \
                            verbose=verbose)
                
    # train model
    start_time = time.time()
    nbrs.fit(Xtrain)
    end_time = time.time()
    train_time = (end_time - start_time)
    print("Fitting time: %f" % train_time)

    # apply model (testing phase)
    start_time = time.time()
    _, _ = nbrs.kneighbors(Xtest_local)
    end_time = time.time()
    test_time = (end_time - start_time)
    print("Testing time: %f" % test_time)
    
    return train_time, test_time
コード例 #2
0
def compute_opt_tree_depth(algorithm, n_test_tree=2000000):

    opt_tree_depth = None

    if algorithm in ["buffer_kd_tree", "kd_tree"]:

        # the different tree depths that shall
        # be tested for this data set
        if algorithm == "buffer_kd_tree":
            tree_depths = range(4, 12)
        elif algorithm == "kd_tree":
            tree_depths = range(8, 16)

        # search for optimal tree depth
        nbrs_tree_test = NearestNeighbors(n_neighbors=n_neighbors, \
                                     algorithm=algorithm, \
                                     n_jobs=n_jobs, \
                                     plat_dev_ids=plat_dev_ids, \
                                     verbose=verbose)
        opt_tree_depth = nbrs_tree_test.compute_optimal_tree_depth(
            Xtrain,
            Xtest[:n_test_tree],
            target="test",
            tree_depths=tree_depths)
        print("Optimal tree depth found: %i " % opt_tree_depth)
    return opt_tree_depth
コード例 #3
0
ファイル: benchmark.py プロジェクト: chidcha/bufferkdtree
def run_algorithm(n_test, tree_depth=None, algorithm="buffer_kd_tree"):

    print("----------------------------------------------------------------------")
    print("\n\nRunning %s for n_test=%i ...\n" % (algorithm, n_test))
    print("----------------------------------------------------------------------")

    Xtest_local = Xtest[:n_test, :]

    # instantiate model
    nbrs = NearestNeighbors(n_neighbors=n_neighbors, \
                            algorithm=algorithm, \
                            n_jobs=n_jobs, \
                            tree_depth=opt_tree_depth, \
                            plat_dev_ids=plat_dev_ids, \
                            verbose=verbose)
                
    # train model
    start_time = time.time()
    nbrs.fit(Xtrain)
    end_time = time.time()
    train_time = (end_time-start_time)
    print("Fitting time: %f" % train_time)

    # apply model (testing phase)
    start_time = time.time()
    dists, inds = nbrs.kneighbors(Xtest_local)
    print "dists=", dists
    end_time = time.time()
    test_time = (end_time-start_time)
    print("Testing time: %f" % test_time)

    # store results 
    if algorithm not in results.keys():
        results[algorithm] = {}
    results[algorithm][n_test] = {'train':train_time, 'test':test_time, 'opt_tree_depth':opt_tree_depth}
コード例 #4
0
ファイル: bufferkd.py プロジェクト: POSTECH-CVLab/NNBenchmark
 def prepare_input(self, x, y):
     tree = NearestNeighbors(
         algorithm="buffer_kd_tree",
         plat_dev_ids=plat_dev_ids,
         tree_depth=9,
     )
     tree.fit(x)
     return tree, y
コード例 #5
0
def run_algorithm(n_test, tree_depth=None, algorithm="buffer_kd_tree"):

    print(
        "----------------------------------------------------------------------"
    )
    print("\n\nRunning %s for n_test=%i ...\n" % (algorithm, n_test))
    print(
        "----------------------------------------------------------------------"
    )

    Xtest_local = Xtest[:n_test, :]

    # instantiate model
    nbrs = NearestNeighbors(n_neighbors=n_neighbors, \
                            algorithm=algorithm, \
                            n_jobs=n_jobs, \
                            tree_depth=opt_tree_depth, \
                            plat_dev_ids=plat_dev_ids, \
                            verbose=verbose)

    # train model
    start_time = time.time()
    nbrs.fit(Xtrain)
    end_time = time.time()
    train_time = (end_time - start_time)
    print("Fitting time: %f" % train_time)

    # apply model (testing phase)
    start_time = time.time()
    dists, inds = nbrs.kneighbors(Xtest_local)
    end_time = time.time()
    test_time = (end_time - start_time)
    print("Testing time: %f" % test_time)

    # store results
    if algorithm not in results.keys():
        results[algorithm] = {}
    results[algorithm][n_test] = {
        'train': train_time,
        'test': test_time,
        'opt_tree_depth': opt_tree_depth
    }
コード例 #6
0
ファイル: benchmark.py プロジェクト: chidcha/bufferkdtree
def compute_opt_tree_depth(algorithm, n_test_tree=2000000):

    opt_tree_depth = None

    if algorithm in ["buffer_kd_tree", "kd_tree"]:

        # the different tree depths that shall
        # be tested for this data set
        if algorithm == "buffer_kd_tree":
            tree_depths = range(4,12)
        elif algorithm == "kd_tree":
            tree_depths = range(8,16)

        # search for optimal tree depth
        nbrs_tree_test = NearestNeighbors(n_neighbors=n_neighbors, \
                                     algorithm=algorithm, \
                                     n_jobs=n_jobs, \
                                     plat_dev_ids=plat_dev_ids, \
                                     verbose=verbose)
        opt_tree_depth = nbrs_tree_test.compute_optimal_tree_depth(Xtrain, Xtest[:n_test_tree], target="test", tree_depths=tree_depths)
        print("Optimal tree depth found: %i " % opt_tree_depth)
    return opt_tree_depth
コード例 #7
0
ファイル: skcompare.py プロジェクト: gieseke/bufferkdtree
def run_algorithm(n_test_local, leaf_size=30, algorithm="kd_tree"):

    print("----------------------------------------------------------------------")
    print("\n\nRunning %s for n_test=%i ...\n" % (algorithm, n_test_local))
    print("----------------------------------------------------------------------")

    Xtest_local = Xtest[:n_test_local, :]

    # instantiate model
    if algorithm == "kd_tree":
        nbrs = NearestNeighbors(n_neighbors=n_neighbors, 
                                algorithm=algorithm, 
                                n_jobs=n_jobs, 
                                leaf_size=leaf_size, 
                                verbose=verbose)
    else:
        nbrs = NearestNeighborsSKLEARN(n_neighbors=n_neighbors, 
                                       algorithm="kd_tree", 
                                       n_jobs=n_jobs, 
                                       leaf_size=leaf_size,                                        
                                       )
                
    # train model
    start_time = time.time()
    nbrs.fit(Xtrain)
    end_time = time.time()
    train_time = (end_time - start_time)
    print("Fitting time: %f" % train_time)

    # apply model (testing phase)
    start_time = time.time()
    _, _ = nbrs.kneighbors(Xtest_local)
    end_time = time.time()
    test_time = (end_time - start_time)
    print("Testing time: %f" % test_time)
    
    return train_time, test_time
コード例 #8
0
ファイル: skcompare.py プロジェクト: SOLab/bufferkdtree
def run_algorithm(n_test_local, leaf_size=30, algorithm="kd_tree"):

    print("----------------------------------------------------------------------")
    print(("\n\nRunning %s for n_test=%i ...\n" % (algorithm, n_test_local)))
    print("----------------------------------------------------------------------")

    Xtest_local = Xtest[:n_test_local, :]

    # instantiate model
    if algorithm == "kd_tree":
        nbrs = NearestNeighbors(n_neighbors=n_neighbors, 
                                algorithm=algorithm, 
                                n_jobs=n_jobs, 
                                leaf_size=leaf_size, 
                                verbose=verbose)
    else:
        nbrs = NearestNeighborsSKLEARN(n_neighbors=n_neighbors, 
                                       algorithm="kd_tree", 
                                       n_jobs=n_jobs, 
                                       leaf_size=leaf_size,                                        
                                       )
                
    # train model
    start_time = time.time()
    nbrs.fit(Xtrain)
    end_time = time.time()
    train_time = (end_time - start_time)
    print(("Fitting time: %f" % train_time))

    # apply model (testing phase)
    start_time = time.time()
    _, _ = nbrs.kneighbors(Xtest_local)
    end_time = time.time()
    test_time = (end_time - start_time)
    print(("Testing time: %f" % test_time))
    
    return train_time, test_time
コード例 #9
0
def buildGraph(ip):
        """Builds the knn grap with intial params.
        params:
        ------
        ip: initial params

        return: 
        ------
        graph: graph object of Graph 
        """
        # find the nearest neighbors on the gpu
        start = time()
        nbrs = NearestNeighbors(n_neighbors=ip.k+1, algorithm="buffer_kd_tree", tree_depth=9, plat_dev_ids={0:[0]})    
        nbrs.fit(ip.signal)
        dists, inds = nbrs.kneighbors(ip.signal)  

        dists_gpu = dists
        dists_gpu = dists_gpu[0:,1:]
        dists_gpu = unroll(dists_gpu)
        dists_gpu = dists_gpu.astype('float32')

        ngbrs_gpu = inds
        ngbrs_gpu = ngbrs_gpu[0:,1:]
        ngbrs_gpu = unroll(ngbrs_gpu)
        ngbrs_gpu = ngbrs_gpu.astype('int32')

        k = ip.k
        scale = ip.sigma
        n, chnl = ip.signal.shape

        # now build the graph using those nns using gpu
        platform = cl.get_platforms()[0]
        print(platform)
        device = platform.get_devices()[0]
        print(device)
        context = cl.Context([device])
        print(context)
        program = cl.Program(context, open(mywf).read()).build()
        print(program)
        queue = cl.CommandQueue(context)
        print(queue)

        # create the buffers on the device, intensity, nbgrs, weights
        mem_flags = cl.mem_flags
        dists_buf = cl.Buffer(context, mem_flags.READ_ONLY | mem_flags.COPY_HOST_PTR,hostbuf=dists_gpu)
        weight_vec = np.ndarray(shape=(n*k,), dtype=np.float32)
        weight_buf = cl.Buffer(context, mem_flags.WRITE_ONLY, weight_vec.nbytes)

        # run the kernel to compute the weights
        program.compute_weights(queue, (n*k,), None,  dists_buf, weight_buf, np.int32(k), np.float32(scale))
        queue.finish()

        # copy the weihts to the host memory
        cl.enqueue_copy(queue, weight_vec, weight_buf)
        queue.finish()
        end = time() - start

        print('total time taken by the gpu python:', end)
        # save the graph
        graph = Graph(weight_vec,ngbrs_gpu,k)
        return graph
コード例 #10
0
ファイル: util.py プロジェクト: chidcha/bufferkdtree
def _conduct_tree_depths_comparison(params, Xtrain, Xtest, target="test", tree_depths=None, verbose=1):
    
    runtimes = {}
    
    model = NearestNeighbors(**params)
    
    if target == "test":            
        
        for tree_depth in tree_depths:

            #model = copy.deepcopy(model)
            model.tree_depth = tree_depth
            model.fit(Xtrain)

            start = time.time()
            model.kneighbors(Xtest)
            end = time.time()

            if model.verbose:
                print("tree_depth %i -> %f" % (tree_depth, end - start))
            runtimes[tree_depth] = end - start
                    
    elif target == "train":

        for tree_depth in tree_depths:

            #model = copy.deepcopy(model)
            model.tree_depth = tree_depth
            start = time.time()
            model.fit(Xtrain)
            end = time.time()
            
            if model.verbose:
                print("tree_depth %i -> %f" % (tree_depth, end - start))
            runtimes[tree_depth] = end - start 
                            
    elif target == "both":
        
        for tree_depth in tree_depths:
                        
            #model = copy.deepcopy(model)
            model.tree_depth = tree_depth
            start = time.time()
            model.fit(Xtrain)
            model.kneighbors(Xtest)
            end = time.time()
            
            if verbose > 0:
                print("tree_depth %i -> %f" % (tree_depth, end - start))
            runtimes[tree_depth] = end - start

    else:

        raise Exception("Unknown target: " + unicode(target))

    return min(runtimes, key=runtimes.get)
コード例 #11
0
ファイル: graph_gpu.py プロジェクト: Exuvi/Graph-GPU-Clone
ap.add_argument("-s",
                "--s",
                type=float,
                required=True,
                help="scale value in the graph")
args = vars(ap.parse_args())

position = numpy.load(args["position"])
position = position['position']
position = position.astype('float32')
print(position.shape)

# find the nearest neighbors on the gpu
start = time()
nbrs = NearestNeighbors(n_neighbors=args["k"] + 1,
                        algorithm="buffer_kd_tree",
                        tree_depth=9,
                        plat_dev_ids={0: [0]})  # use the arg parser here
nbrs.fit(position)
dists, inds = nbrs.kneighbors(position)

# now build the graph using those nns using gpu
platform = cl.get_platforms()[0]
print(platform)

device = platform.get_devices()[0]
print(device)

context = cl.Context([device])
print(context)

program = cl.Program(context, open("kernels.cl").read()).build()
コード例 #12
0
ファイル: artificial.py プロジェクト: chidcha/bufferkdtree
# Authors: Fabian Gieseke 
# Licence: GNU GPL (v2)

import numpy
from bufferkdtree.neighbors import NearestNeighbors

n_neighbors = 10
plat_dev_ids = {0:[0]}
n_jobs = 1
verbose = 0

X = numpy.random.uniform(low=-1, high=1, size=(10000,10))

# (1) apply buffer k-d tree implementation
nbrs_buffer_kd_tree = NearestNeighbors(algorithm="buffer_kd_tree", \
                        tree_depth=9, \
                        plat_dev_ids=plat_dev_ids, \
                        verbose=verbose)    
nbrs_buffer_kd_tree.fit(X)
dists, inds = nbrs_buffer_kd_tree.kneighbors(X, n_neighbors=n_neighbors)
print("\nbuffer_kd_tree output\n" + unicode(dists[0]))

# (2) apply brute-force implementation
nbrs_brute = NearestNeighbors(algorithm="brute", \
                        plat_dev_ids=plat_dev_ids, \
                        verbose=verbose)    
nbrs_brute.fit(X)
dists, inds = nbrs_brute.kneighbors(X, n_neighbors=n_neighbors)
print("\nbrute output\n" + unicode(dists[0]))

# (3) apply k-d tree mplementation
nbrs_kd_tree = NearestNeighbors(algorithm="kd_tree", \
コード例 #13
0
def _conduct_tree_depths_comparison(params,
                                    Xtrain,
                                    Xtest,
                                    target="test",
                                    tree_depths=None,
                                    verbose=1):

    runtimes = {}

    model = NearestNeighbors(**params)

    if target == "test":

        for tree_depth in tree_depths:

            #model = copy.deepcopy(model)
            model.tree_depth = tree_depth
            model.fit(Xtrain)

            start = time.time()
            model.kneighbors(Xtest)
            end = time.time()

            if model.verbose:
                print("tree_depth %i -> %f" % (tree_depth, end - start))
            runtimes[tree_depth] = end - start

    elif target == "train":

        for tree_depth in tree_depths:

            #model = copy.deepcopy(model)
            model.tree_depth = tree_depth
            start = time.time()
            model.fit(Xtrain)
            end = time.time()

            if model.verbose:
                print("tree_depth %i -> %f" % (tree_depth, end - start))
            runtimes[tree_depth] = end - start

    elif target == "both":

        for tree_depth in tree_depths:

            #model = copy.deepcopy(model)
            model.tree_depth = tree_depth
            start = time.time()
            model.fit(Xtrain)
            model.kneighbors(Xtest)
            end = time.time()

            if verbose > 0:
                print("tree_depth %i -> %f" % (tree_depth, end - start))
            runtimes[tree_depth] = end - start

    else:

        raise Exception("Unknown target: " + unicode(target))

    return min(runtimes, key=runtimes.get)
コード例 #14
0
def buildGraph(ip, dev=0):
        """Builds the knn grap with intial params.
        params:
        ------
        ip: initial params

        return: 
        ------
        graph: graph object of Graph 
        """

        start = time()
        nbrs = NearestNeighbors(n_neighbors = ip.k + 1, algorithm="buffer_kd_tree", tree_depth=9, plat_dev_ids={0:[0]})    
        nbrs.fit(ip.position)
        dists, inds = nbrs.kneighbors(ip.position)
        print("success") if bool_1 else print()

        # now build the graph using those nns using gpu
        platform = cl.get_platforms()[0]
        print(platform)
        device = platform.get_devices()[dev]
        print(device)
        context = cl.Context([device])
        print(context)
        program = cl.Program(context, open(mywf).read()).build()
        print(program)
        queue = cl.CommandQueue(context)
        print(queue)
        
         # define the input here which is the ndbrs gpu
        ngbrs_gpu = inds
        ngbrs_gpu = ngbrs_gpu[0:,1:]
        ngbrs_gpu = unroll(ngbrs_gpu)
        ngbrs_gpu = ngbrs_gpu.astype('int32')
        
         # define the second input here which is the signal levels
        signal =  ip.signal
        n, chnl = signal.shape
        signal = np.reshape(signal,(n*chnl,),order='F')
        signal = signal.astype('float32')
        print("signal",signal.shape) if bool_1 else print()
        k = ip.k
        print("n is :", n) if bool_1 else print()
        scale = ip.sigma
        
         # create the buffers on the device, intensity, nbgrs, weights
        mem_flags = cl.mem_flags
        ngbrs_buf = cl.Buffer(context, mem_flags.READ_ONLY | mem_flags.COPY_HOST_PTR,hostbuf=ngbrs_gpu)
        signal_buf = cl.Buffer(context, mem_flags.READ_ONLY | mem_flags.COPY_HOST_PTR, hostbuf=signal)
        weight_vec = np.ndarray(shape=(n*k,), dtype=np.float32)
        weight_buf = cl.Buffer(context, mem_flags.WRITE_ONLY, weight_vec.nbytes)
        
         # run the kernel to compute the weights
        program.compute_weights(queue, (n,), None, signal_buf,  ngbrs_buf, weight_buf, np.int32(k), np.float32(scale), np.int32(chnl))
        
        queue.finish() #OT
        
        # copy the weihts to the host memory
        cl.enqueue_copy(queue, weight_vec, weight_buf)
        end = time() - start
        
        print('total time taken by the gpu python:', end) if bool_1 else print()
        # save the graph
        graph = Graph(weight_vec,ngbrs_gpu,k)
        return graph