Example #1
0
def kernel(request):
    if request.param == 1:
        return GaussianKernel(sigma=1)
    elif request.param == 2:
        return LinearKernel()
    elif request.param == 3:
        return PolynomialKernel(1.2, 3, 2.5)
Example #2
0
def k1():
    return GaussianKernel(sigma=1)
Example #3
0
 def kernel(self):
     return GaussianKernel(100.0)
Example #4
0
                                    globals=_vars,
                                    number=1,
                                    repeat=exp['repetitions'])
            exp['timings'].append(min(timings))
            print(exp, flush=True)
            torch.cuda.empty_cache()
    return experiments


if __name__ == "__main__":
    aparse = argparse.ArgumentParser(description="MMV experiment runner")
    aparse.add_argument('--num-gpus', type=int, required=True)
    args = aparse.parse_args()
    num_gpus = args.num_gpus

    kernel = GaussianKernel(3.0)
    Ns = [
        1000, 5000, 20000, 50000, 100000, 200000, 400000, 600_000, 1_000_000,
        2_000_000, 10_000_000, 50_000_000, 100_000_000
    ]
    KeopsDs = [10, 50, 100, 250, 500, 750, 1000, 1250]
    OurDs = [
        10, 50, 100, 250, 500, 750, 1000, 1250, 1500, 2000, 2500, 3000, 4000,
        5000, 7000, 10000
    ]
    defaultM = 20_000
    defaultN = 20_000
    defaultT = 10
    defaultD = 10

    experiments = [
Example #5
0
def kernel():
    return GaussianKernel(10.0)