def main(unused_args): aibench_check(FLAGS.benchmark_option in base_pb2.BenchmarkOption.keys(), "Wrong benchmark option %s" % FLAGS.benchmark_option) benchmark_option = base_pb2.BenchmarkOption.Value(FLAGS.benchmark_option) target_socs = None if FLAGS.target_socs != "all": target_socs = set(FLAGS.target_socs.split(',')) target_devices = sh_commands.get_target_socs_serialnos(target_socs) if not target_devices: print("No available target!") if FLAGS.num_targets != 0 and FLAGS.num_targets < len(target_devices): random.shuffle(target_devices) target_devices = target_devices[:FLAGS.num_targets] if not os.path.exists(FLAGS.output_dir): os.mkdir(FLAGS.output_dir) target_abis = FLAGS.target_abis.split(',') target = FLAGS.target host_bin_path, bin_name = sh_commands.bazel_target_to_bin(target) executors, device_types, push_list, benchmark_list = \ sh_commands.prepare_all_models( FLAGS.executors, FLAGS.model_names, FLAGS.device_types, FLAGS.output_dir) configs = get_configs() input_dir = sh_commands.prepare_datasets(configs, FLAGS.output_dir, FLAGS.input_dir) if base_pb2.TFLITE in executors: sh_commands.get_tflite(configs, FLAGS.output_dir) result_files = [] for target_abi in target_abis: print("Prepare to run models on %s" % target_abi) if target_abi not in ABI_TYPES: print("Not supported abi: %s" % target_abi) continue if target_abi == "host": print("Unable to run on host yet!") continue for serialno in target_devices: sh_commands.bazel_build(serialno, target, target_abi, executors, device_types) if target_abi not in set(sh_commands.adb_supported_abis(serialno)): print("Skip device %s which does not support ABI %s" % (serialno, target_abi)) continue result_file = sh_commands.adb_run( target_abi, serialno, host_bin_path, bin_name, benchmark_option, input_dir, FLAGS.run_interval, FLAGS.num_threads, FLAGS.max_time_per_lock, push_list, benchmark_list, executors, DEVICE_PATH, FLAGS.output_dir) result_files.append(result_file) process_result(result_files)
def get_model_file(file_path, checksum, output_dir, push_list): filename = file_path.split('/')[-1] if file_path.startswith("http"): local_file_path = output_dir + '/' + filename if not os.path.exists(local_file_path) \ or file_checksum(local_file_path) != checksum: print("downloading %s..." % filename) urllib.urlretrieve(file_path, local_file_path) aibench_check(file_checksum(local_file_path) == checksum, "file %s md5 checksum not match" % filename) else: local_file_path = file_path aibench_check(file_checksum(local_file_path) == checksum, "file %s md5 checksum not match" % filename) push_list.append(local_file_path)
def process(self, lines, product_info): head = str(base_pb2.Precision) + ":" for line in lines: if line.startswith(head): # executor,model_name,device_type,quantize,evaluator,precision parts = line[len(head):].split(',') executor = int(parts[0]) model_name = base_pb2.ModelName.Name(int(parts[1])) device_type = base_pb2.DeviceType.Name(int(parts[2])) quantize = "Quantized" if int(parts[3]) else "Float" evaluator = int(parts[4]) precision = str(float(parts[5])) aibench_check( evaluator == base_pb2.MetricEvaluator.ImageClassification, "Only support ImageClassification now") record = [model_name, quantize] + product_info + [ device_type, executor] self.precisions.append(record + [precision])
def main(unused_args): aibench_check(FLAGS.benchmark_option in base_pb2.BenchmarkOption.keys(), "Wrong benchmark option %s" % FLAGS.benchmark_option) benchmark_option = base_pb2.BenchmarkOption.Value(FLAGS.benchmark_option) target_socs = None if FLAGS.target_socs != "all": target_socs = set(FLAGS.target_socs.split(',')) target_devices = bench_engine.get_target_devices_by_socs(target_socs) if not target_devices: print("No available target!") if FLAGS.num_targets != 0 and FLAGS.num_targets < len(target_devices): random.shuffle(target_devices) target_devices = target_devices[:FLAGS.num_targets] if not os.path.exists(FLAGS.output_dir): os.mkdir(FLAGS.output_dir) target_abis = FLAGS.target_abis.split(',') target = FLAGS.target host_bin_path, bin_name = bench_engine.bazel_target_to_bin(target) executors, device_types, push_list, benchmark_list = \ bench_engine.prepare_all_models( FLAGS.executors, FLAGS.model_names, FLAGS.device_types, FLAGS.output_dir) configs = get_configs() input_dir = bench_engine.prepare_datasets(configs, FLAGS.output_dir, FLAGS.input_dir) if base_pb2.TFLITE in executors: bench_engine.get_tflite(configs, FLAGS.output_dir) result_files = [] for target_abi in target_abis: print("Prepare to run models on %s" % target_abi) if target_abi not in ABI_TYPES: print("Not supported abi: %s" % target_abi) continue if target_abi == "host": print("Unable to run on host yet!") continue threads = [] for target_device in target_devices: if target_abi not in target_device.target_abis: print("Skip device %s which does not support ABI %s" % (target_device.address, target_abi)) continue avail_executors = \ target_device.get_available_executors(executors, target_abi) if len(avail_executors) == 0: print("Skip device %s which doesn't support current " "executors" % target_device.address) continue if FLAGS.all_devices_at_once: t = threading.Thread(target=run_on_device, args=( target_device, target_abi, push_list, avail_executors, target, device_types, host_bin_path, bin_name, input_dir, benchmark_option, benchmark_list, result_files, )) t.start() threads.append(t) else: run_on_device( target_device, target_abi, push_list, avail_executors, target, device_types, host_bin_path, bin_name, input_dir, benchmark_option, benchmark_list, result_files, ) if FLAGS.all_devices_at_once: for t in threads: t.join() if len(result_files) > 0: process_result(result_files)