示例#1
0
def run_10(folder, icpc, ml, sl, sc, s_i):
    for i in range(s_i, s_i+10):
        d = root + "/" + folder + "/" + str(i)
        if not os.path.exists(d):
            os.mkdir(d)
        os.chdir(d)

        with open('details.txt', 'w') as f:
            f.write(str(icpc) + "\t" + str(ml) + "\t" + str(sl) + "\t" + str(sc) + "\n")

        bm.main([icpc, ml, sl, sc])
示例#2
0
def process(function, argument):
    code = get_code(argument)
    if code == None:
        raise InputError
    if function == 'indicator':
        print(indicator.main(code))
    elif function == 'benchmark':
        print(benchmark.main(code))
示例#3
0
def test_install():
    """
    Test the ecample provided in the documentation. Shows that the module
    installs well. Not yet intended to show proper operation--just that it
    properly installs and imports, esp. on Python 3 where previous versions have
    not run. More an integration test than a unit test, but whatev!
    """

    import math

    class Benchmark_Sqrt(benchmark.Benchmark):

        each = 4 # scaled for fast operation

        def setUp(self):
            # Only using setUp in order to subclass later
            # Can also specify tearDown, eachSetUp, and eachTearDown
            self.size = 20

        def test_pow_operator(self):
            for i in xrange(self.size):
                z = i**.5

        def test_pow_function(self):
            for i in xrange(self.size):
                z = pow(i, .5)

        def test_sqrt_function(self):
            for i in xrange(self.size):
                z = math.sqrt(i)

    class Benchmark_Sqrt2(Benchmark_Sqrt):
        # Subclass the previous benchmark to change input using
        # self.setUp()

        label = "Benchmark Sqrt on a larger range"
        # The above label comes from the class name, this oen
        # comes from the label attribute

        each = 9

        def setUp(self):
            self.size = 75

    benchmark.main(format="markdown", numberFormat="%.4g")
def main():
    #Redirect stdout and stderr to nothing
    devnull = open(os.devnull, 'w')
    stdout = sys.stdout
    sys.stdout = devnull

    for renderer in RENDERERS:
        print(renderer)
        for resolution in RESOLUTIONS:
            print(" %sx%s" % resolution)
            for test in os.listdir("tests"):
                result = benchmark.main(renderer, test, TEST_LENGTH, *resolution)
                print("  %s: %s" % (test, result))


    #Direct stdout back to stdout
    sys.stdout = stdout
    devnull.close()
示例#5
0
    def test_fnmatch(self):
        items = []
        for root, dirs, files in os.walk(self.walk_root):
            for item in fnmatch.filter(files, '*.txt'):
                items.append(os.path.join(root, item))
    
    def test_re(self):
        items = []
        rex = re.compile(".*\.txt$")
        for root, dirs, files in os.walk(self.walk_root):
            for item in files:
                if rex.match(item):
                    items.append(os.path.join(root, item))

if __name__ == '__main__':
    benchmark.main(each=50)

#  Benchmark Report
#  ================
#  
#  Glob Tests
#  ----------
#  
#     name | rank | runs |           mean |              sd
#  --------|------|------|----------------|----------------
#       re |    1 |   50 | 0.262927365303 | 0.0152220841337
#  fnmatch |    2 |   50 | 0.265928983688 | 0.0218745928887
#     glob |    3 |   50 | 0.274979395866 | 0.0158716836404
#  
#  Each of the above 150 runs were run in random, non-consecutive order by
#  `benchmark` v0.0.1 (http://jspi.es/benchmark) with Python 2.7.1 
import benchmark
import numpy as np
import rasterio as rio
from skimage.morphology import disk
from geoblend.coefficients import matrix_from_mask


class Benchmark_Coefficients(benchmark.Benchmark):
    
    def setUp(self):
        self.mask200 = np.pad(disk(200), 2, mode='constant')
        self.mask400 = np.pad(disk(400), 2, mode='constant')
        self.mask800 = np.pad(disk(800), 2, mode='constant')
        self.mask1500 = np.pad(disk(1500), 2, mode='constant')

    def test_cython_disk_200(self):
        mat = matrix_from_mask(self.mask200)

    def test_cython_disk_400(self):
        mat = matrix_from_mask(self.mask400)

    def test_cython_disk_800(self):
        mat = matrix_from_mask(self.mask800)

    def test_cython_disk_1500(self):
        mat = matrix_from_mask(self.mask1500)


if __name__ == '__main__':
    benchmark.main(format='markdown', each=10)
示例#7
0
        self.size_3d = 100
        self.eps = 1.0e-8
        self.type = 'gpu_dft'
'''


class benchmark_gpu_nufft_100(benchmark_nufft):
    each = 50

    def setUp(self):
        self.size_1d = 1
        self.size_2d = 1000
        self.size_3d = 1
        self.eps = 1.0e-8
        self.type = 'gpu_nufft'


# class benchmark_nufft_100(benchmark_nufft):
#     each = 50
#
#     def setUp(self):
#         self.size_1d = 1
#         self.size_2d = 512
#         self.size_3d = 1
#         self.eps = 1.0e-8
#         self.type = 'nufft'

if __name__ == '__main__':
    print "Running benchmarks..."
    benchmark.main(format="markdown")
示例#8
0
文件: bm.py 项目: gctucker/splat
    runs = 0
    results = []
    for bm_cls in bms:
        bm = bm_cls(each=iterations, prefix=PREFIX)
        bm.run()
        results.append(bm)
        runs += bm.getTotalRuns()
    return {res.__class__.__name__: res.table for res in results}


if __name__ == '__main__':
    parser = argparse.ArgumentParser("Standard Splat benchmark.")
    parser.add_argument('--iterations', type=int, default=DEFAULT_ITERATIONS,
                        help="number of iterations")
    parser.add_argument('--format', default='reST',
                        choices=['reST', 'csv', 'comma', 'markdown'],
                        help="report output format")
    parser.add_argument('--pickle',
                        help="do not generate report but pickle the results")
    args = parser.parse_args(sys.argv[1:])

    if args.pickle:
        results = run_no_report(iterations=args.iterations)
        cPickle.dump(results, open(args.pickle, 'w'))
    else:
        benchmark.main(format=args.format, numberFormat="%.4g",
                       each=args.iterations, prefix=PREFIX)

    sys.exit(0)
示例#9
0
        self._dict_setitem()

    def test_getitem(self):
        """Benckmark :py:meth:`StackedDict.__getitem__`."""
        for key in self.key_range:
            self.stackeddict[key]

    def test_dict_getitem(self):
        """Benckmark standard dict's __getitem__ for comparison purpose."""
        for key in self.key_range:
            self.dict[key]

    def test_iter(self):
        """Benchmark :py:meth:`StackedDict.__iter__`."""
        for break_threshold in range(0, 10):
            for key in iter(self.stackeddict):
                if key > break_threshold:
                    break

    def test_dict_iter(self):
        """Benchmark standard dict's __iter__ for comparison purpose."""
        for break_threshold in range(0, 10):
            for key in iter(self.dict):
                if key > break_threshold:
                    break


if __name__ == '__main__':
    benchmark.main(format="markdown", numberFormat="%.4g", each=100,
                   sort_by='name')
        return sorted(L, key=lambda x: x[1])

    def test_generator(self):
        L = ((k, v) for (k, v) in self.d.iteritems())
        return sorted(L, key=lambda x: x[1])

    def test_lambda(self):
        return sorted(self.d.iteritems(), key=lambda x: x[1])

    def test_formalFnInner(self):
        def fninner(x):
            return x[1]

        return sorted(self.d.iteritems(), key=fninner)

    def test_formalFnOuter(self):
        return sorted(self.d.iteritems(), key=fnouter)


class SortLargerDictByValue(SortDictByValue):

    label = "Sort Dict with 1000 Keys by Value"
    each = 1000

    def setUp(self):
        self.d = dict(zip(range(1000), range(1000)))


if __name__ == '__main__':
    benchmark.main()  # each is a variable in the above classes
示例#11
0
        bm.run()
        results.append(bm)
        runs += bm.getTotalRuns()
    return {res.__class__.__name__: res.table for res in results}


if __name__ == '__main__':
    parser = argparse.ArgumentParser("Standard Splat benchmark.")
    parser.add_argument('--iterations',
                        type=int,
                        default=DEFAULT_ITERATIONS,
                        help="number of iterations")
    parser.add_argument('--format',
                        default='reST',
                        choices=['reST', 'csv', 'comma', 'markdown'],
                        help="report output format")
    parser.add_argument('--pickle',
                        help="do not generate report but pickle the results")
    args = parser.parse_args(sys.argv[1:])

    if args.pickle:
        results = run_no_report(iterations=args.iterations)
        cPickle.dump(results, open(args.pickle, 'w'))
    else:
        benchmark.main(format=args.format,
                       numberFormat="%.4g",
                       each=args.iterations,
                       prefix=PREFIX)

    sys.exit(0)
示例#12
0
		random.shuffle(self.array)
				
		
#	def test_quad(self):
#		quadratic(self.array)
		
	def test_part(self):
		self.array = range(-self.n, self.n)
		random.shuffle(self.array)
		part(self.array)
#		part(numpy.array(self.array))
		
#	def test_linear(self):
#		self.array = range(-self.n, self.n)
#		random.shuffle(self.array)
#		linear(self.array)
	
	def test_uber(self):
		self.array = range(-self.n, self.n)
		random.shuffle(self.array)
		uberopti(self.array)
#	def test_opti(self):
#		for i in xrange(self.times):
#			self.array = range(-self.n, self.n)
#			random.shuffle(self.array)
#			linearoptimized(numpy.array(self.array))
	

if __name__ == '__main__':
	benchmark.main()
示例#13
0
        for i in xrange(self.size):
            z = pow(i, .5)
    
    def test_sqrt_function(self):
        for i in xrange(self.size):
            z = math.sqrt(i)

class Benchmark_Sqrt2(Benchmark_Sqrt):
    # Subclass the previous benchmark to change input using self.setUp()
    
    label = "Benchmark Sqrt on a larger range"
    # The benchmark abovel comes from the class name; this one comes from the
    # label attribute
    
    each = 10

    format="rst"
    sort_by = "mean" # could be anything allowed in order list
    label = "Benchmark Sqrt on a larger range"
    order =  ['name', 'rank', 'runs', 'mean', 'sd', 'factor', 'pvalue']
    header = ['Test', 'Rank', 'Runs', 'Mean (s)', 'SD (s)', 'Factor', 'T-Test P-Value']
    cellFormats = ['%s', '%d', '%d', "%.3e", "%.3e", "%.2g", "%.2g"]
    
    def setUp(self):
        self.size = 750000


if __name__ == '__main__':
    benchmark.main(format="markdown", numberFormat="%.5g", sort_by="mean") 
    # could have written benchmark.main(each=10) if the
    # first class shouldn't have been run 50 times.
示例#14
0
                    "hello": 1,
                    "zzz": []
                },
                "bob",
                "agnes"
            ],
            "1": 55.7,
            "3": {
                "xxxxx": {},
                "ggg": []
            },
            "b": None,
            "c": True,
            "d": False
        }

    def test_jsoncanon(self):
        for i in xrange(self.size):
            jsoncanon.dumps(self.doc)

    def test_jsoncanon_sorting_lists(self):
        for i in xrange(self.size):
            jsoncanon.dumps(self.doc, sort_lists=True)

    def test_json_module(self):
        for i in xrange(self.size):
            json.dumps(self.doc)

if __name__ == "__main__":
    benchmark.main(format="rst")
示例#15
0
import benchmark

import time

class BenchmarkPause(benchmark.Benchmark):
    
    def test_one_hundredth(self):
        time.sleep(.01)
    
    def test_one_tenth(self):
        time.sleep(.1)
    
if __name__ == '__main__':
    benchmark.main(each=10)
示例#16
0
        required=False,
        default=2,
        help=benchmark.HELP_MESSAGES['INFER_REQUESTS_COUNT_MESSAGE'])
    args.add_argument(
        '-nthreads',
        '--number_threads',
        type=int,
        required=False,
        default=None,
        help=benchmark.HELP_MESSAGES['INFER_NUM_THREADS_MESSAGE'])
    args.add_argument('-b',
                      '--batch_size',
                      type=int,
                      required=False,
                      default=None,
                      help=benchmark.HELP_MESSAGES['BATCH_SIZE_MESSAGE'])
    args.add_argument(
        '-pin',
        '--infer_threads_pinning',
        type=str,
        required=False,
        default='YES',
        choices=['YES', 'NO'],
        help=benchmark.HELP_MESSAGES['INFER_THREADS_PINNING_MESSAGE'])
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()
    benchmark.main(args)
示例#17
0
def run():
    benchmark.main(module="datavalidation.benchmark.classes")
示例#18
0
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.

from setuptools import setup

import benchmark
import generate_readme


def readme():
    with open("README.md") as f:
        return f.read()


benchmark.main()
generate_readme.main()

setup(
    name="masked-convolution",
    version="0.3.0",
    description="A PyTorch wrapper for masked convolutions",
    long_description=readme(),
    long_description_content_type="text/markdown",
    classifiers=[
        "Development Status :: 3 - Alpha",
        "Environment :: GPU",
        "Intended Audience :: Developers",
        "Intended Audience :: Education",
        "Intended Audience :: Information Technology",
        "Intended Audience :: Science/Research",
示例#19
0
    def test_fnmatch(self):
        items = []
        for root, dirs, files in os.walk(self.walk_root):
            for item in fnmatch.filter(files, '*.txt'):
                items.append(os.path.join(root, item))
    
    def test_re(self):
        items = []
        rex = re.compile(".*\.txt$")
        for root, dirs, files in os.walk(self.walk_root):
            for item in files:
                if rex.match(item):
                    items.append(os.path.join(root, item))

if __name__ == '__main__':
    benchmark.main(each=50)

#  Benchmark Report
#  ================
#  
#  Glob Tests
#  ----------
#  
#     name | rank | runs |           mean |              sd
#  --------|------|------|----------------|----------------
#       re |    1 |   50 | 0.262927365303 | 0.0152220841337
#  fnmatch |    2 |   50 | 0.265928983688 | 0.0218745928887
#     glob |    3 |   50 | 0.274979395866 | 0.0158716836404
#  
#  Each of the above 150 runs were run in random, non-consecutive order by
#  `benchmark` v0.0.1 (http://jspi.es/benchmark) with Python 2.7.1 
示例#20
0
import benchmark

import time
import datetime

class TimeTests(benchmark.Benchmark):
    label = 'datetime vs. time'
    def test_utcnow(self):
        return datetime.datetime.utcnow().isoformat()[:-6]+'000Z'
    
    def test_gmtime(self):
        return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())

if __name__ == '__main__':
    benchmark.main(
        each=100, 
        format='rst', 
        order = ['rank', 'name', 'runs', 'mean'], # no sd
        header=["Rank", "Name","Runs", "Mean (s)"]
    )
        def fninner(x):return x[1]
        return sorted(self.d.iteritems(), key=fninner)

    def test_formalFnOuter(self):
        return sorted(self.d.iteritems(), key=fnouter)

class SortLargerDictByValue(SortDictByValue):

    label = "Sort Dict with 1000 Keys by Value"
    each = 1000

    def setUp(self):
        self.d = dict(zip(range(1000),range(1000)))

if __name__ == '__main__':
    benchmark.main(format="plain") # each is a variable in the above classes

#  Benchmark Report
#  ================
#
#  Sort Dict with 100 Keys by Value
#  --------------------------------
#
#           name | rank |  runs |              mean |                sd
#  --------------|------|-------|-------------------|------------------
#         pep265 |    1 | 10000 | 5.30271053314e-05 | 1.32238298129e-05
#         lambda |    2 | 10000 | 6.49063587189e-05 | 1.62878955883e-05
#  formalFnInner |    3 | 10000 | 6.51606559753e-05 | 1.58921554292e-05
#  formalFnOuter |    4 | 10000 | 6.51744127274e-05 | 1.60679178962e-05
#  listExpansion |    5 | 10000 | 7.95884609222e-05 | 1.82228267913e-05
#      generator |    6 | 10000 | 8.46118688583e-05 |  1.9538229455e-05
示例#22
0
import benchmark
with open("commits") as f:
    for line in f:
        before, after = line.split()
        benchmark.main(before, after)
示例#23
0
        def fninner(x):return x[1]
        return sorted(self.d.items(), key=fninner)
    
    def test_formalFnOuter(self):
        return sorted(self.d.items(), key=fnouter)

class SortLargerDictByValue(SortDictByValue):
    
    label = "Sort Dict with 1000 Keys by Value"
    each = 1000
    
    def setUp(self):
        self.d = dict(zip(range(1000),range(1000)))

if __name__ == '__main__':
    benchmark.main() # each is a variable in the above classes

#  Benchmark Report
#  ================
#  
#  Sort Dict with 100 Keys by Value
#  --------------------------------
#  
#           name | rank |  runs |              mean |                sd
#  --------------|------|-------|-------------------|------------------
#         pep265 |    1 | 10000 | 5.30271053314e-05 | 1.32238298129e-05
#         lambda |    2 | 10000 | 6.49063587189e-05 | 1.62878955883e-05
#  formalFnInner |    3 | 10000 | 6.51606559753e-05 | 1.58921554292e-05
#  formalFnOuter |    4 | 10000 | 6.51744127274e-05 | 1.60679178962e-05
#  listExpansion |    5 | 10000 | 7.95884609222e-05 | 1.82228267913e-05
#      generator |    6 | 10000 | 8.46118688583e-05 |  1.9538229455e-05
示例#24
0

    label = "matvec with 5000 elements and size = 1,000,000"
    each = 100


    def setUp(self):

        self.nbr_elements = 5000
        self.size = 1000000

        self.A_c = LLSparseMatrix(size=self.size, size_hint=self.nbr_elements, itype=INT32_T, dtype=FLOAT64_T)
        self.A_p = spmatrix.ll_mat(self.size, self.size, self.nbr_elements)
        self.A_s = lil_matrix((self.size, self.size), dtype=np.float64)

        self.list_of_matrices = []
        self.list_of_matrices.append(self.A_c)
        self.list_of_matrices.append(self.A_p)
        self.list_of_matrices.append(self.A_s)

        construct_random_matrices(self.list_of_matrices, self.size, self.nbr_elements)

        self.CSR_c = self.A_c.to_csr()
        self.CSR_p = self.A_p.to_csr()
        self.CSR_s = self.A_s.tocsr()

        self.v = np.arange(0, self.size, dtype=np.float64)

if __name__ == '__main__':
    benchmark.main(format="markdown", numberFormat="%.4g")
示例#25
0
import benchmark

import time
import datetime


class TimeTests(benchmark.Benchmark):
    label = 'datetime vs. time'

    def test_utcnow(self):
        return datetime.datetime.utcnow().isoformat()[:-6] + '000Z'

    def test_gmtime(self):
        return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())


if __name__ == '__main__':
    benchmark.main(
        each=100,
        format='rst',
        order=['rank', 'name', 'runs', 'mean'],  # no sd
        header=["Rank", "Name", "Runs", "Mean (s)"])
示例#26
0
    def test_sqrt_function(self):
        for i in xrange(self.size):
            z = math.sqrt(i)


class Benchmark_Sqrt2(Benchmark_Sqrt):
    # Subclass the previous benchmark to change input using self.setUp()

    label = "Benchmark Sqrt on a larger range"
    # The benchmark abovel comes from the class name; this one comes from the
    # label attribute

    each = 10

    format = "rst"
    sort_by = "mean"  # could be anything allowed in order list
    label = "Benchmark Sqrt on a larger range"
    order = ["name", "rank", "runs", "mean", "sd", "factor", "pvalue"]
    header = ["Test", "Rank", "Runs", "Mean (s)", "SD (s)", "Factor", "T-Test P-Value"]
    cellFormats = ["%s", "%d", "%d", "%.3e", "%.3e", "%.2g", "%.2g"]

    def setUp(self):
        self.size = 750000


if __name__ == "__main__":
    benchmark.main(format="markdown", numberFormat="%.5g", sort_by="mean")
    # could have written benchmark.main(each=10) if the
    # first class shouldn't have been run 50 times.
示例#27
0
# !/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import benchmark
# We open the log file in writting mode
configuration = [[50, 1000], [100, 1000], [250, 1000], [500, 1000],
                 [750, 1000], [1000, 1000]]

with open('myLogFile', 'w') as fichieryu:
    sys.stdout = fichieryu
    for c in configuration:
        print('$' + str(c[0]) + ' ' + str(c[1]))

        for i in range(0, 2):
            benchmark.main(c[0], c[1])
            print('#')

fichieryu.close()