Beispiel #1
0
def run_computation(index, input_file, definition):
    from dask_cluster import start_cluster

    input = definition["inputs"][int(index)]
    output = definition["outputs"][int(index)]

    is_pbs = "PBS_JOBID" in os.environ

    workdir = get_workdir(
        os.environ.get("PBS_JOBID", "local-{}".format(index)), input_file,
        output)

    if not os.path.exists(workdir):
        os.makedirs(workdir)
    with open(os.path.join(workdir, os.path.basename(input_file)), "w") as dst:
        definition["index"] = index
        json.dump(definition, dst, indent=4)

    dask_cluster = None
    if definition.get("dask"):
        start_cluster(port=DASK_PORT, path=BENCHMARK_DIR)
        dask_cluster = "{}:{}".format(socket.gethostname(), DASK_PORT)

    graph_frame = load_graphs([input])
    frame = load_resultfile(output, True)

    if is_pbs:
        with open(os.path.join(workdir, "output"), "w") as out:
            with open(os.path.join(workdir, "error"), "w") as err:
                sys.stdout = out
                sys.stderr = err
                run_benchmark(parse_configs(definition,
                                            graph_frame), frame, output, True,
                              parse_timeout(definition.get("timeout")),
                              dask_cluster)
    else:
        run_benchmark(parse_configs(definition, graph_frame),
                      frame, output, True,
                      parse_timeout(definition.get("timeout")), dask_cluster)
Beispiel #2
0
def run_cifar_benchmark(n_iter=10,
                        n_skip=5,
                        batch_size=4,
                        transformer_type='cpu'):
    inputs, data, train_set = get_fake_cifar(batch_size, n_iter)
    model = get_mini_resnet(inputs)
    optimizer = GradientDescentMomentum(0.01, 0.9)

    train_loss = ng.cross_entropy_multi(model(inputs['image']),
                                        ng.one_hot(inputs['label'], axis=ax.Y))

    batch_cost = ng.sequential(
        [optimizer(train_loss),
         ng.mean(train_loss, out_axes=())])
    batch_cost_computation_op = ng.computation(batch_cost, "all")

    feed_dict = fill_feed_dict(train_set, inputs)
    benchmarks = dict()
    benchmarks['cifar_msra_fprop'] = run_benchmark(batch_cost_computation_op,
                                                   transformer_type, feed_dict,
                                                   n_skip, n_iter)
    print_benchmark_results(benchmarks)
     help="precision of the computation in bits",
 )
 parser.add_argument(
     "-b",
     "--benchmark",
     type=int,
     default=1,
     dest="benchmark",
     help="number of times to benchmark this application (default 1 - "
     "normal execution)",
 )
 args = parser.parse_args()
 if args.P == 16:
     run_benchmark(
         run_black_scholes,
         args.benchmark,
         "Black Scholes",
         (args.N, np.float16),
     )
 elif args.P == 32:
     run_benchmark(
         run_black_scholes,
         args.benchmark,
         "Black Scholes",
         (args.N, np.float32),
     )
 elif args.P == 64:
     run_benchmark(
         run_black_scholes,
         args.benchmark,
         "Black Scholes",
         (args.N, np.float64),
Beispiel #4
0
#!/usr/bin/python3
from benchmark import run_benchmark
from benchmark import print_results
from unit_preference_methods import *
from splitting_methods import *

results = run_benchmark(100, 3, 4.4, 0.2, 5, 5, [max_unit_choice],
                        [two_clause_choice, jeroslow_wang_choice])

print_results(results)
        dest="I",
        help="number of iterations to run",
    )
    parser.add_argument(
        "-n",
        "--num",
        type=int,
        default=100,
        dest="N",
        help="number of elements in one dimension",
    )
    parser.add_argument(
        "-t",
        "--time",
        dest="timing",
        action="store_true",
        help="perform timing",
    )
    parser.add_argument(
        "-b",
        "--benchmark",
        type=int,
        default=1,
        dest="benchmark",
        help="number of times to benchmark this application (default 1 "
        "- normal execution)",
    )
    args = parser.parse_args()
    run_benchmark(run_stencil, args.benchmark, "Stencil",
                  (args.N, args.I, args.timing))
def main():
    parser = cli.init_parser()
    args = parser.parse_args()
    log.setLevel(logging.DEBUG if args.debug else logging.INFO)
    config.DEBUG = args.debug
    config.UPDATE = args.properties or args.templates
    config.BENCHMARK = args.benchmark
    config.SIMILARITY_METRIC = args.metric
    config.THRESHOLD = args.similarity
    config.FIGURES = args.figures

    log.debug(f'Started in DEBUG mode')
    log.info('Hit CTRL+D to exit')

    properties = None
    filtered_properties = None
    templates = []

    # update properties if asked or if we're trying to update templates without a properties.json file
    if args.properties or (args.templates and not has_properties_cache()):
        log.info('Updating properties')
        timer.tic()
        properties = update()
        log.info(f'Cached properties in: {timer.tocvalue()}')

    if args.templates or not has_templates_cache():
        log.info('Regenerating templates from cached properties')
        log.info(
            f'Using {config.MIN_PROPERTY_REFERENCE_COUNT} as minimum reference requirement'
        )
        timer.tic()
        if properties == None:
            properties = load_properties_from_cache()

        total_properties = 0
        for property_type in properties:
            total_properties += len(properties[property_type])
        log.info(
            f'{len(properties.keys())} property types with {total_properties} total properties found'
        )

        filtered_properties = get_filtered_properties(properties)

        templates = generate_templates_from_properties(filtered_properties)
        log.info(f'Generated {len(templates)} question templates')
        save_to_file(templates, filename=config.TEMPLATES_FILENAME)
        log.info(f'Templates created in: {timer.tocvalue()}')
    else:
        log.debug('Loading templates from cache')
        templates = load_templates_from_cache()

    if args.benchmark and args.question:
        log.error('Cannot ask question and run benchmarks at the same time')
        sys.exit(-1)

    if args.log or args.benchmark:
        now = datetime.now()
        current_time = now.strftime("%Y:%m:%d-%H:%M:%S")
        log_file = logging.FileHandler(
            f'results/{current_time}-benchmarks.log')
        log.addHandler(log_file)

    log.info(f'Loaded {len(templates)} templates')
    log.info(
        f"Using '{config.SIMILARITY_METRIC}' as similarity metric, with threshold of {config.THRESHOLD}"
    )

    if args.benchmark:
        log.info('Running benchmarks...')

        run_benchmark(templates)

    elif args.question:
        answer = get_answer(args.question, templates)
        log.info(answer)

    elif args.ask:
        while True:
            try:
                question = input("Ask a question:\n")
                if question == '':
                    continue
                answer = get_answer(question, templates)
                log.info(answer)
                log.info('')
            except KeyboardInterrupt as interrupt:
                log.info('')
                continue
            except EOFError:
                log.info('\nExiting\n')
                sys.exit(0)

    else:
        parser.print_help()
Beispiel #7
0
def main():
    run_benchmark('ufo_benchmark',
                  'cu2qu.ufo',
                  'fonts_to_quadratic',
                  setup_suffix='defcon',
                  repeat=10)
Beispiel #8
0
     dest="S",
     help="number of iterations between sampling the log likelihood",
 )
 parser.add_argument(
     "--benchmark",
     type=int,
     default=1,
     dest="benchmark",
     help="number of times to benchmark this application (default 1 - "
     "normal execution)",
 )
 args = parser.parse_args()
 if args.P == 16:
     run_benchmark(
         run_linear_regression,
         args.benchmark,
         "LINREG(H)",
         (args.N, args.F, np.float16, args.I, args.S, args.B),
     )
 elif args.P == 32:
     run_benchmark(
         run_linear_regression,
         args.benchmark,
         "LINREG(S)",
         (args.N, args.F, np.float32, args.I, args.S, args.B),
     )
 elif args.P == 64:
     run_benchmark(
         run_linear_regression,
         args.benchmark,
         "LINREG(D)",
         (args.N, args.F, np.float64, args.I, args.S, args.B),
        type=int,
        default=4,
        dest="sentence",
        help="sentence length",
    )
    parser.add_argument(
        "-w", "--word", type=int, default=10, dest="word", help="word size"
    )
    parser.add_argument(
        "-t",
        "--time",
        dest="timing",
        action="store_true",
        help="perform timing",
    )
    parser.add_argument(
        "--benchmark",
        type=int,
        default=1,
        dest="benchmark",
        help="number of times to benchmark this application (default 1 - "
        "normal execution)",
    )
    args = parser.parse_args()
    run_benchmark(
        run_lstm,
        args.benchmark,
        "LSTM Forward",
        (args.batch, args.hidden, args.sentence, args.word, args.timing),
    )
Beispiel #10
0
     dest="S",
     help="number of iterations between sampling the log likelihood",
 )
 parser.add_argument(
     "--benchmark",
     type=int,
     default=1,
     dest="benchmark",
     help="number of times to benchmark this application (default 1 - "
     "normal execution)",
 )
 args = parser.parse_args()
 if args.P == 16:
     run_benchmark(
         run_logistic_regression,
         args.benchmark,
         "LOGREG(H)",
         (args.N, args.F, np.float16, args.I, args.S, args.B),
     )
 elif args.P == 32:
     run_benchmark(
         run_logistic_regression,
         args.benchmark,
         "LOGREG(S)",
         (args.N, args.F, np.float32, args.I, args.S, args.B),
     )
 elif args.P == 64:
     run_benchmark(
         run_logistic_regression,
         args.benchmark,
         "LOGREG(D)",
         (args.N, args.F, np.float64, args.I, args.S, args.B),
Beispiel #11
0
     "--benchmark",
     type=int,
     default=1,
     dest="benchmark",
     help="number of times to benchmark this application (default 1 - "
     "normal execution)",
 )
 args = parser.parse_args()
 if args.P == 16:
     run_benchmark(
         run_kmeans,
         args.benchmark,
         "KMEANS(H)",
         (
             args.C,
             args.D,
             np.float16,
             args.I,
             args.N * 1000,
             args.S,
             args.benchmark > 1,
         ),
     )
 elif args.P == 32:
     run_benchmark(
         run_kmeans,
         args.benchmark,
         "KMEANS(S)",
         (
             args.C,
             args.D,
             np.float32,
Beispiel #12
0
        dest="verbose",
        action="store_true",
        help="print verbose output",
    )
    parser.add_argument(
        "-b",
        "--benchmark",
        type=int,
        default=1,
        dest="benchmark",
        help="number of times to benchmark this application (default 1 - "
        "normal execution)",
    )

    args = parser.parse_args()
    run_benchmark(
        run_cg,
        args.benchmark,
        "PreCG" if args.precondition else "CG",
        (
            args.N,
            args.corners,
            args.precondition,
            args.conv_iters,
            args.max_iters,
            args.check,
            args.timing,
            args.verbose,
        ),
    )
        type=float,
        default=0.1,
        dest="weight",
        help="standard deviation of weights for initialization",
    )
    parser.add_argument(
        "-b",
        "--benchmark",
        type=int,
        default=1,
        dest="benchmark",
        help="number of times to benchmark this application (default 1 - "
        "normal execution)",
    )
    args = parser.parse_args()
    run_benchmark(
        run_lstm,
        args.benchmark,
        "LSTM Full",
        (
            args.file_name,
            args.hidden,
            args.steps,
            args.loops,
            args.rate,
            args.weight,
            args.dump,
            args.timing,
        ),
    )
Beispiel #14
0
        "-t",
        "--time",
        dest="timing",
        action="store_true",
        help="perform timing",
    )
    parser.add_argument(
        "-v",
        "--verbose",
        dest="verbose",
        action="store_true",
        help="print verbose output",
    )
    parser.add_argument(
        "-b",
        "--benchmark",
        type=int,
        default=1,
        dest="benchmark",
        help="number of times to benchmark this application (default 1 - "
        "normal execution)",
    )

    args = parser.parse_args()
    run_benchmark(
        run_jacobi,
        args.benchmark,
        "Jacobi",
        (args.N, args.iters, args.check, args.timing, args.verbose),
    )
    args = parser.parse_args()
    log.init_log()
    # set working directory to this file's path
    handler.set_working_directory()
    build_all = True
    
    logging.info("Main module: build.py.")
    if (args.core == True or args.format == True or args.tools == True):
        build_all = False

    configurations = loader.get_configs_from_json()
    if (configurations is None):
        logging.error("No valid configurations were loaded.")
        sys.exit(1)

    resolve_dependencies(configurations)

    if (build_all == True):
        build_projects(configurations, build_core=True,\
                       build_formats=True, build_tools=True)
    else:
        build_projects(configurations, args.core, args.format, \
                   args.tools)
    
    if (args.generate == True):
        for config in configurations:
            generate.generate_models(config)
    if (args.benchmark == True):
        for config in configurations:
            benchmark.run_benchmark(config)
Beispiel #16
0
    parser.add_argument(
        "-p",
        "--precision",
        type=int,
        default=32,
        dest="P",
        help="number of bits of precision to use for the gemm computation "
        "(16,32,64)",
    )
    parser.add_argument(
        "-b",
        "--benchmark",
        type=int,
        default=1,
        dest="benchmark",
        help="number of times to benchmark this application (default 1 - "
        "normal execution)",
    )
    args = parser.parse_args()
    if args.P == 16:
        run_benchmark(run_gemm, args.benchmark, "HGEMM",
                      (args.N, args.I, np.float16))
    elif args.P == 32:
        run_benchmark(run_gemm, args.benchmark, "SGEMM",
                      (args.N, args.I, np.float32))
    elif args.P == 64:
        run_benchmark(run_gemm, args.benchmark, "DGEMM",
                      (args.N, args.I, np.float64))
    else:
        raise TypeError("Precision must be one of 16, 32, or 64")