예제 #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--config",
                        type=str,
                        default="benchmark_config.yml",
                        help="Path to config file")
    args = parser.parse_args()

    log.debug("Loading Configuration from file {filename}".format(
        filename=args.config))
    with open(args.config, "r") as stream:
        try:
            config = yaml.safe_load(stream)

            log.info("Initializing Benchmarker.")
            benchmarker = Benchmarker(config)

            benchmarker.run_pre_tasks()

            log.info("Starting to run benchmarks.")
            try:
                benchmarker.run()
            except bioblend.ConnectionError:
                log.error(
                    "There was a problem with the connection. Benchmark canceled."
                )

            results_filename = "results/results_{time}".format(
                time=time.time())
            log.info("Saving results to file: '{filename}.json'.".format(
                filename=results_filename))
            os.makedirs(os.path.dirname(results_filename), exist_ok=True)
            benchmarker.save_results(results_filename)

            if benchmarker.inflx_db is not None:
                log.info("Sending results to influxDB.")
                benchmarker.send_results_to_influxdb()

            benchmarker.run_post_tasks()

        except yaml.YAMLError as exc:
            print(exc)
        except IOError as err:
            print(err)
예제 #2
0
parser.add_argument('--install', choices=['client', 'server', 'all'], default='all', help='Allows you to only install the server or client software')
parser.add_argument('--install-error-action', choices=['abort', 'continue'], default='continue', help='action to take in case of error during installation')
parser.add_argument('--test', nargs='+', help='names of tests to run')
parser.add_argument('--exclude', nargs='+', help='names of tests to exclude')
parser.add_argument('--type', choices=['all', 'json', 'db', 'query', 'fortune', 'update', 'plaintext'], default='all', help='which type of test to run')
parser.add_argument('-m', '--mode', choices=['benchmark', 'verify'], default='benchmark', help='verify mode will only start up the tests, curl the urls and shutdown')
parser.add_argument('--list-tests', action='store_true', default=False, help='lists all the known tests that can run')
parser.add_argument('--next-sort', action='store_true', default=False, help='displays the next value that can be used as a sort value')
parser.add_argument('--max-concurrency', default=256, help='the maximum concurrency that the tests will run at. The query tests will run at this concurrency', type=int)
parser.add_argument('--max-queries', default=20, help='The maximum number of queries to run during the query test', type=int)
parser.add_argument('--query-interval', default=5, type=int)
parser.add_argument('--max-threads', default=8, help='The max number of threads to run weight at, this should be set to the number of cores for your system.', type=int)
parser.add_argument('--duration', default=60, help='Time in seconds that each test should run for.')
parser.add_argument('--starting-concurrency', default=8, type=int)
parser.add_argument('--sleep', type=int, default=60, help='the amount of time to sleep after starting each test to allow the server to start up.')
parser.add_argument('--parse', help='Parses the results of the given timestamp and merges that with the latest results')
parser.add_argument('--name', default="ec2", help='The name to give this test. Results will be placed in a folder using this name.')
args = parser.parse_args()

benchmarker = Benchmarker(vars(args))

# Run the benchmarker in the specified mode
if benchmarker.list_tests:
  benchmarker.run_list_tests()
elif benchmarker.next_sort:
  benchmarker.next_sort_value()
elif benchmarker.parse != None:
  benchmarker.parse_timestamp()
else:
  benchmarker.run()
예제 #3
0
"""
Objects data benchmarks.
"""

if __name__ == "__main__":
    b = Benchmarker()

    header = "| Test | FPS |\n| --- | --- |\n"
    output = header[:]

    rows = []

    b.start()
    rows.append(
        b.run(boxes=True,
              images=True,
              passes="_img",
              row="--boxes --images --passes _img"))

    b.start()
    rows.append(
        b.run(boxes=True,
              images=True,
              passes="_id",
              row="--boxes --images --passes _id"))

    b.start()
    rows.append(
        b.run(boxes=True,
              images=True,
              passes="none",
              id_grayscale=True,
예제 #4
0
    '--sleep',
    type=int,
    default=60,
    help=
    'the amount of time to sleep after starting each test to allow the server to start up.'
)
parser.add_argument(
    '--parse',
    help=
    'Parses the results of the given timestamp and merges that with the latest results'
)
parser.add_argument(
    '--name',
    default="ec2",
    help=
    'The name to give this test. Results will be placed in a folder using this name.'
)
args = parser.parse_args()

benchmarker = Benchmarker(vars(args))

# Run the benchmarker in the specified mode
if benchmarker.list_tests:
    benchmarker.run_list_tests()
elif benchmarker.next_sort:
    benchmarker.next_sort_value()
elif benchmarker.parse != None:
    benchmarker.parse_timestamp()
else:
    benchmarker.run()
예제 #5
0
from benchmarker import Benchmarker
"""
Objects data benchmarks.
"""

if __name__ == "__main__":
    b = Benchmarker()

    output = "| Test | FPS |\n| --- | --- |\n"

    rows = []

    b.start()
    rows.append(b.run(boxes=True, transforms=True, row="--boxes --transforms"))

    b.start()
    rows.append(
        b.run(boxes=True, rigidbodies=True, row="--boxes --rigidbodies"))

    b.start()
    rows.append(b.run(boxes=True, collisions=True, row="--boxes --collisions"))

    b.start()
    rows.append(b.run(boxes=True, bounds=True, row="--boxes --bounds"))

    b.start()
    rows.append(
        b.run(boxes=True,
              transforms=True,
              rigidbodies=True,
              collisions=True,
from benchmarker import Benchmarker
from time import sleep

bench = Benchmarker()


def s1():
    sleep(1)


def s2():
    sleep(2)


def s3():
    sleep(3)


bench.addTask("sleep 1", s1)
bench.addTask("sleep 2", s2)
bench.addTask("sleep 3", s3)

bench.run()
bench.displayTimes()