def _get_devices(num_devices): """Returns list of device names in usb:N or pci:N format. This function prefers returning PCI Edge TPU first. Args: num_devices: int, number of devices expected Returns: list of devices in pci:N and/or usb:N format Raises: RuntimeError: if not enough devices are available """ edge_tpus = list_edge_tpus() if len(edge_tpus) < num_devices: raise RuntimeError( 'Not enough Edge TPUs detected, expected %d, detected %d.' % (num_devices, len(edge_tpus))) num_pci_devices = sum(1 for device in edge_tpus if device['type'] == 'pci') return ['pci:%d' % i for i in range(min(num_devices, num_pci_devices))] + [ 'usb:%d' % i for i in range(max(0, num_devices - num_pci_devices)) ]
def configure(self, modelDir): # Workaround, if no edgetpu is available if not edgetpu.list_edge_tpus(): print("No EdgeTPUs found. Using the CPU only...") from tflite_runtime.interpreter import Interpreter self.interpreter = Interpreter(modelDir + "/model.tflite") else: print("EdgeTPU found. Connecting to it via PyCoral...") from pycoral.utils.edgetpu import make_interpreter self.interpreter = make_interpreter(modelDir + "/model.tflite") self.interpreter.allocate_tensors() self.modelDir = modelDir self._inputSize = common.input_size(self.interpreter) self._labels = dataset.read_label_file(modelDir + "/labels.txt")
def main(): num_inferences = 30000 input_filename = 'cat.bmp' all_tpus = list_edge_tpus() num_devices = len(all_tpus) num_pci_devices = sum(1 for device in all_tpus if device['type'] == 'pci') devices = [ 'pci:%d' % i for i in range(min(num_devices, num_pci_devices)) ] + ['usb:%d' % i for i in range(max(0, num_devices - num_pci_devices))] model_names = [ 'mobilenet_v1_1.0_224_quant_edgetpu.tflite', 'mobilenet_v2_1.0_224_quant_edgetpu.tflite', 'ssd_mobilenet_v1_coco_quant_postprocess_edgetpu.tflite', 'ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite', 'inception_v1_224_quant_edgetpu.tflite', 'inception_v2_224_quant_edgetpu.tflite', 'inception_v3_299_quant_edgetpu.tflite', 'inception_v4_299_quant_edgetpu.tflite', ] def show_speedup(inference_costs): logging.info('Single Edge TPU base time: %f seconds', inference_costs[0]) for i in range(1, len(inference_costs)): logging.info('# TPUs: %d, speedup: %f', i + 1, inference_costs[0] / inference_costs[i]) inference_costs_map = {} for model_name in model_names: task_type = 'classification' if 'ssd' in model_name: task_type = 'detection' inference_costs_map[model_name] = [0.0] * num_devices for num_threads in range(num_devices, 0, -1): cost = run_inference_job(model_name, input_filename, num_inferences, num_threads, task_type, devices) inference_costs_map[model_name][num_threads - 1] = cost logging.info('model: %s, # threads: %d, cost: %f seconds', model_name, num_threads, cost) show_speedup(inference_costs_map[model_name]) logging.info('============Summary==========') for model_name in model_names: inference_costs = inference_costs_map[model_name] logging.info('---------------------------') logging.info('Model: %s', model_name) show_speedup(inference_costs)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--classification_model', help='Path of classification model.', required=True) parser.add_argument('--detection_model', help='Path of detection model.', required=True) parser.add_argument('--image', help='Path of the image.', required=True) parser.add_argument('--num_inferences', help='Number of inferences to run.', type=int, default=2000) parser.add_argument( '--batch_size', help='Runs one model batch_size times before switching to the other.', type=int, default=10) args = parser.parse_args() if len(list_edge_tpus()) <= 1: raise RuntimeError( 'This demo requires at least two Edge TPU available.') print( 'Running %s and %s with one Edge TPU, # inferences %d, batch_size %d.' % (args.classification_model, args.detection_model, args.num_inferences, args.batch_size)) cost_one_tpu = run_two_models_one_tpu(args.classification_model, args.detection_model, args.image, args.num_inferences, args.batch_size) print( 'Running %s and %s with two Edge TPUs, # inferences %d.' % (args.classification_model, args.detection_model, args.num_inferences)) cost_two_tpus = run_two_models_two_tpus(args.classification_model, args.detection_model, args.image, args.num_inferences) print('Inference with one Edge TPU costs %.2f seconds.' % cost_one_tpu) print('Inference with two Edge TPUs costs %.2f seconds.' % cost_two_tpus)
def edge_tpus(): """Yields all available unassigned Edge TPU devices. Set CORAL_VISIBLE_DEVICES environmental variable to a comma-separated list of device paths to make only those devices visible to the application. """ try: from pycoral.utils.edgetpu import list_edge_tpus from watsor.detection.edge_tpu import CoralObjectDetector env_cvd = os.environ.get("CORAL_VISIBLE_DEVICES") visible_devices = [x.strip() for x in env_cvd.split(",") ] if env_cvd is not None else [] devices = list_edge_tpus() for idx, device in enumerate(devices): device_name = '{}:{}'.format(device['type'], idx) if len(visible_devices) > 0 and device_name not in visible_devices: continue yield device_name, CoralObjectDetector except (RuntimeError, ImportError): return
def test_list_edge_tpu_paths(self): self.assertGreater(len(edgetpu.list_edge_tpus()), 0)