示例#1
0
def main():
    ''' Main method
    '''
    build_dir = "../../build/"
    bin_dir = "../../bin"

    benchmark = Benchmark(EXPERIMENTS, RUNS, build_dir, bin_dir)
    benchmark.generate()
    benchmark.run()
def main():
    ''' Main method
    '''
    build_dir = "../../build/"
    bin_dir = "../../bin"

    for exp in EXPERIMENTS:
        exp['command'] += f" -r {exp['number_of_rows']} -d {exp['number_of_columns']}"
        exp['command'] += f" -s {exp['selectivity']} -q {exp['number_of_queries']}"
        exp['command'] += f" -f {exp['data']} -w {exp['workload']}"

    benchmark = Benchmark(EXPERIMENTS, RUNS, build_dir, bin_dir)
    benchmark.generate()
    benchmark.run()
示例#3
0
def main(epochs, buffer_size, batch_size, train_mode, 
        distribution_strategy, num_gpus,
        workers, w_type, w_index):


    strategy = get_distribution_strategy(strategy=distribution_strategy, num_gpus=num_gpus, workers=workers, typ=w_type, index=w_index)
    print_msg ('Number of devices: {}'.format(strategy.num_replicas_in_sync), 'info')
   
    data_obj = Dataset(batch_size=128)
    train_dataset, test_dataset = data_obj.create_dataset()
    steps_per_epoch = data_obj.get_buffer_size()//(batch_size)
    train_obj = Benchmark(epochs, steps_per_epoch, 'resnet56')

    with strategy.scope():
        # Create and compile model within strategy scope
        train_obj.create_model('resnet56')
        train_obj.compile_model()
        
    print_msg('Training...', 'info')
    train_obj.run(train_dataset, test_dataset, train_mode)
    print_msg('Training Done.', 'succ')
示例#4
0
class RunBenchmark():
    ver = 3

    def __init__(self):
        self.mt = MeasureThread()
        self.benchmark = Benchmark()
        self.cf = CreateFiles()
        self.db = DB()

    def run(self):
        print("###### BENCHMARK ######")
        self.mt.start()
        try:
            self.benchmark.run()
        finally:
            self.mt.stop()

    def rm_data(self):
        one_path = self.cf.get_file_paths()[0]
        shutil.rmtree(os.path.dirname(one_path), ignore_errors=True)
        print("Remove tmp data")
示例#5
0
def main(epochs, buffer_size, batch_size, train_mode, display_every,
         distribution_strategy, num_gpus, workers, w_type, w_index,
         setup_cluster, verbose):

    if verbose: os.environ["TF_CPP_MIN_LOG_LEVEL"] = str(verbose)
    strategy = get_distribution_strategy(strategy=distribution_strategy,
                                         train_mode=train_mode,
                                         num_gpus=num_gpus,
                                         workers=workers,
                                         typ=w_type,
                                         index=w_index,
                                         setup=setup_cluster)
    if num_gpus == 1: num_gpus = strategy.num_replicas_in_sync
    print_msg('Number of devices: {}'.format(num_gpus), 'info')

    data_obj = Dataset(batch_size)
    train_dataset, test_dataset = data_obj.create_dataset()
    steps_per_epoch = data_obj.get_buffer_size() // (batch_size)
    train_obj = Benchmark(epochs, steps_per_epoch, batch_size, display_every,
                          num_gpus, 'resnet56', strategy)

    print_msg('Training...', 'info')
    train_obj.run(train_dataset, test_dataset, train_mode)
    print_msg('Training Done.', 'succ')
示例#6
0
  graph, edge_weights = hg.make_graph_from_points(X, 'delaunay')
  return tuple((graph, edge_weights))

def knnGraph(X):
  graph, edge_weights = hg.make_graph_from_points(X, 'knn', n_neighbor=3)
  return tuple((graph, edge_weights))

def MST(G):
  return hg.minimum_spanning_tree(G[0], G[1])

def SLINK(G):
  return hg.binary_partition_tree_single_linkage(G[0], G[1])

def CLINK(G):
  return hg.binary_partition_tree_complete_linkage(G[0], G[1])

if __name__ == "__main__":
  import sys
  filePath = "data.csv"
  if len(sys.argv) > 1:
    filePath = sys.argv[1]

  hg1 = Benchmark("higra-dt-mst", loadPoints, delaunayGraph, MST)
  hg1.run(filePath)

  hg2 = Benchmark("higra-3nn-clink", loadPoints, knnGraph, CLINK)
  hg2.run(filePath)

  hg1.info()
  hg2.info()
示例#7
0
from benchmark import Benchmark

modules = ['numpy','Numeric','numarray']

b = Benchmark(modules,
              title='Casting a (10,10) integer array to float.',
              runs=3,reps=10000)

N = [10,10]
b['numpy'] = ('b = a.astype(int)',
              'a=numpy.zeros(shape=%s,dtype=float)' % N)
b['Numeric'] = ('b = a.astype("l")',
                'a=Numeric.zeros(shape=%s,typecode="d")' % N)
b['numarray'] = ("b = a.astype('l')",
                 "a=numarray.zeros(shape=%s,typecode='d')" % N)

b.run()
示例#8
0
from benchmark import Benchmark

modules = ['numpy', 'Numeric', 'numarray']

b = Benchmark(modules,
              title='Casting a (10,10) integer array to float.',
              runs=3,
              reps=10000)

N = [10, 10]
b['numpy'] = ('b = a.astype(int)', 'a=numpy.zeros(shape=%s,dtype=float)' % N)
b['Numeric'] = ('b = a.astype("l")',
                'a=Numeric.zeros(shape=%s,typecode="d")' % N)
b['numarray'] = ("b = a.astype('l')",
                 "a=numarray.zeros(shape=%s,typecode='d')" % N)

b.run()
示例#9
0
def handler(event: Dict, context: Dict) -> Dict:
    '''Lambda handler function

    Arguments accepted in event:

    :verbose: (bool) whether to run in verbose mode with log output
    :ignore_coldstart: (bool) whether to ignore results from cold starts when
        computing Lambda performance speed
    :test_count: (int) how many tests to run with each memory allocation
    :max_threads: (int) maximum number of threads to run concurrently
    :lambda_function: (str) Lambda function to invoke and benchmark
    :lambda_event: (dict) event to provide the Lambda
    :memory_sets: (list) list of memory allocations to benchmark
        AWS Lambda accepts memory from 128 to 3008 Mb in increments of 128 Mb
    '''
    try:
        # Log event payload for debugging and security purposes
        print_payload(payload_type='event', payload_obj=event)

        valid, error = validate_event(event=event)

        if not valid:
            response = {
                'status': 400,
                'results': [],
                'errors': [error],
            }

        else:
            benchmarking = Benchmark(**event)

            results = benchmarking.run()

            response = {
                'status': 200,
                'results': results,
                'errors': benchmarking.public_errors,
            }

    except Exception as error:
        if isinstance(error, custom_exc.CustomBenchmarkException):
            response = {
                'status': 500,
                'results': [],
                'errors': [
                    f'{type(error).__name__}: {str(error)}',
                ],
            }

        else:
            logger.error(error)
            logger.exception(error)

            response = {
                'status': 500,
                'results': [],
                'errors': [
                    'Sorry there was an internal error',
                ]
            }

    # Log response object for debugging and security purposes
    print_payload(payload_type='response', payload_obj=response)

    return response
示例#10
0
def main(max_cams, timeout, broker, port, broker2, port2, name, interval):
    avg_lats = []
    bps_sent = []
    bps_recvd = []
    dropped_clients = []
    dropped_packets_percent = []
    cpu = []
    mem = []
    print()
    for num_cams in range(1, max_cams + interval, interval):
        print(f"----- Running Benchmark with {num_cams} Clients -----")

        test = Benchmark(f"{name}_c{num_cams}", num_cams, timeout * 60,
                         (broker, broker2), (port, port2), make_scene())
        test.run()

        avg_lats += [
            test.get_avg_lats() if test.get_avg_lats() else [-1] * 100
        ]
        bps_sent += [test.get_bps_sent()]
        bps_recvd += [test.get_bps_recvd()]
        dropped_clients += [test.get_dropped_clients()]
        dropped_packets_percent += [test.dropped_packets_percent()]
        cpu += [np.mean(test.get_cpu())]
        mem += [np.mean(test.get_mem())]

        print("----- Summary -----")
        if not broker2:
            print(
                f"{num_cams} Clients connecting to {broker}:{port} with {timeout} sec timeout:"
            )
        else:
            print(
                f"{num_cams} Clients connecting to {broker}:{port} and {broker2}:{port2} with {timeout} sec timeout:"
            )
        print(f"  {np.mean(avg_lats[-1])} ms response time")
        print(
            f"  {bps_sent[-1]} bytes/ms sent | {bps_recvd[-1]} bytes/ms received"
        )
        print(
            f"  {dropped_clients[-1]} clients dropped | {dropped_packets_percent[-1]*100}% packet loss"
        )
        print(f"  {cpu[-1]*100}% cpu usage | {mem[-1]*100}% mem usage")
        print()

        test.save()
        time.sleep(1)

    avg_lats = np.array(avg_lats)
    bps_sent = np.array(bps_sent)
    bps_recvd = np.array(bps_recvd)
    dropped_clients = np.array(dropped_clients)
    dropped_packets_percent = np.array(dropped_packets_percent)
    cpu = np.array(cpu)
    mem = np.array(mem)

    np.savez(f"data/benchmark_{name}_c{num_cams}_i{interval}",
             avg_lats=avg_lats,
             bps_sent=bps_sent,
             bps_recvd=bps_recvd,
             dropped_clients=dropped_clients,
             dropped_packets_percent=dropped_packets_percent,
             cpu=cpu,
             mem=mem)
示例#11
0
文件: runner.py 项目: balarsen/benchy
        return output


if __name__ == "__main__":
    from benchmark import Benchmark, BenchmarkSuite

    setup = ""
    statement = "lst = ['c'] * 100000"
    bench = Benchmark(statement, setup, name='list with "*"')

    statement = "lst = ['c' for x in xrange(100000)]"
    bench2 = Benchmark(statement, setup, name="list with xrange")

    statement = "lst = ['c' for x in range(100000)]"
    bench3 = Benchmark(statement, setup, name="list with range")
    results = bench3.run()
    rst_text = bench3.to_rst(results)
    with open("teste.rst", "w") as f:
        f.write(rst_text)

    suite = BenchmarkSuite()
    suite.append(bench)
    suite.append(bench2)
    suite.append(bench3)

    runner = BenchmarkRunner(suite, ".", "List Creation")
    n_benchs, results = runner.run()
    # print results
    # fig = runner.plot_relative(results, horizontal=True)
    # plt.savefig('%s_r.png' % runner.name, bbox_inches='tight')
示例#12
0
  edges = geograph.GabrielGraph(X, True)
  geograph.saveEdgeSnap("tmp.snap", edges)
  return geograph.loadFromSnap("tmp.snap", True, True)

def cleanUp():
  import os
  os.remove("tmp.snap")

if __name__ == "__main__":
  import sys
  filePath = "data.csv"
  if len(sys.argv) > 1:
    filePath = sys.argv[1]

  gg1 = Benchmark("geograph-dt-mst", gt.loadPoints, delaunayGraph, gt.MST)
  gg1.run(filePath)

  gg2 = Benchmark("geograph-3nn-clink", gt.loadPoints, knnGraph, gt.CLINK)
  gg2.run(filePath)

  gg3 = Benchmark("geograph-3nn-filtered-cc", gt.loadPoints, filteredKnnGraph, gt.CC)
  gg3.run(filePath)

  gg4 = Benchmark("geograph-gabriel-sssp", gt.loadPoints, gabrielGraph, gt.SSSP)
  gg4.run(filePath)

  gg1.info()
  gg2.info()
  gg3.info()
  gg4.info()
示例#13
0
文件: runner.py 项目: balarsen/benchy
        return output


if __name__ == '__main__':
    from benchmark import Benchmark, BenchmarkSuite
    setup = ''
    statement = "lst = ['c'] * 100000"
    bench = Benchmark(statement, setup, name='list with "*"')

    statement = "lst = ['c' for x in xrange(100000)]"
    bench2 = Benchmark(statement, setup, name='list with xrange')

    statement = "lst = ['c' for x in range(100000)]"
    bench3 = Benchmark(statement, setup, name='list with range')
    results = bench3.run()
    rst_text = bench3.to_rst(results)
    with open('teste.rst', 'w') as f:
        f.write(rst_text)

    suite = BenchmarkSuite()
    suite.append(bench)
    suite.append(bench2)
    suite.append(bench3)

    runner = BenchmarkRunner(suite, '.', 'List Creation')
    n_benchs, results = runner.run()
    #print results
    #fig = runner.plot_relative(results, horizontal=True)
    #plt.savefig('%s_r.png' % runner.name, bbox_inches='tight')