Ejemplo n.º 1
0
def main():
    """Main runner for shell"""
    from ai_benchmark import AIBenchmark

    parsed_args = parser.parse_known_args()[0]

    benchmark = AIBenchmark(
        use_cpu=parsed_args.use_cpu,
        verbose_level=parsed_args.verbose,
        seed=parsed_args.seed,
    )
    test_info, results = benchmark.run(
        precision=parsed_args.precision,
        test_ids=parsed_args.test_ids,
        training=parsed_args.run_training,
        inference=parsed_args.run_inference,
        micro=parsed_args.run_micro,
        cpu_cores=parsed_args.cpu_cores,
        inter_threads=parsed_args.inter_threads,
        intra_threads=parsed_args.intra_threads,
    )
    if parsed_args.json:
        output = vars(results)
        output['test_results'] = {
            k: vars(v)
            for k, v in output['test_results'].items()
        }
        output['test_info'] = vars(test_info)
        output['test_info'].pop('results', None)
        print(json.dumps(output, indent=4))
Ejemplo n.º 2
0
from ai_benchmark import AIBenchmark
benchmark = AIBenchmark(use_CPU=True, verbose_level=3)
results = benchmark.run(precision="high")
import tensorflow as tf
print(tf.__version__)

from ai_benchmark import AIBenchmark
benchmark = AIBenchmark()
results = benchmark.run()
Ejemplo n.º 4
0
from ai_benchmark import AIBenchmark
benchmark = AIBenchmark(use_CPU=True, verbose_level=3)
results = benchmark.run()
    else:
        os.environ["KMP_AFFINITY"] = "verbose,{policy}".format(
            policy=thread_mapping_policy)
        os.system("export KMP_AFFINITY=verbose,{policy}".format(
            policy=thread_mapping_policy))


if __name__ == "__main__":
    """ python3 run_experiments.py machine_name start_index n_rounds
    """
    dir_path = "/home/users/mwcamargo/td_mapping/src/resultados/"
    machine_name = sys.argv[1]
    start_index = int(sys.argv[2])
    n_rounds = int(sys.argv[3])

    benchmark = AIBenchmark(use_CPU=True)

    #Generate a doe corresponding to some round and them reads it to pick up the mappings corresponding to the round
    for i in range(start_index, start_index + n_rounds):
        generateDoeCSV(dir_path + "doe_" + machine_name +
                       "_{number}.csv".format(number=i))

        with open(
                dir_path + "doe_" + machine_name +
                "_{number}.csv".format(number=i), "r") as doe:
            print("------" + i + " round------", flush=True)
            experiment_rounds = doe.readlines()

            for experiment_round in experiment_rounds:
                mappings = experiment_round.split(",")
                thread_mapping = mappings[0]
Ejemplo n.º 6
0
from ai_benchmark import AIBenchmark
benchmark = AIBenchmark(use_CPU=False,verbose_level = 1)
results = benchmark.run_inference(precision="high") 
Ejemplo n.º 7
0
from ai_benchmark import AIBenchmark

teste = AIBenchmark(use_CPU=True)
teste.run(precision="high")