def render_gen(args): fps_counter = utils.avg_fps_counter(30) engines, titles = utils.make_engines(args.model, DetectionEngine) assert utils.same_input_image_sizes(engines) engines = itertools.cycle(engines) engine = next(engines) labels = utils.load_labels(args.labels) if args.labels else None filtered_labels = set( l.strip() for l in args.filter.split(',')) if args.filter else None get_color = make_get_color(args.color, labels) draw_overlay = True yield utils.input_image_size(engine) output = None while True: tensor, layout, command = (yield output) inference_rate = next(fps_counter) if draw_overlay: start = time.monotonic() # Changed to detect_with_input_tensor. Res is same # See https://coral.googlesource.com/edgetpuvision/+/refs/heads/4.14.98%5E%21/#F0 objs = engine.detect_with_input_tensor(tensor, threshold=args.threshold, top_k=args.top_k) inference_time = time.monotonic() - start objs = [convert(obj, labels) for obj in objs] if labels and filtered_labels: objs = [obj for obj in objs if obj.label in filtered_labels] objs = [ obj for obj in objs if args.min_area <= obj.bbox.area() <= args.max_area ] if args.print: print_results(inference_rate, objs) autoturret_render_artifacts = controller.run(objs) title = titles[engine] output = overlay(title, objs, get_color, inference_time, inference_rate, layout, autoturret_render_artifacts) else: output = None if command == 'o': draw_overlay = not draw_overlay elif command == 'n': engine = next(engines)
def render_gen(self, args1): fps_counter = utils.avg_fps_counter(30) args = self.parser.parse_args() engines, titles = utils.make_engines(args.model, DetectionEngine) assert utils.same_input_image_sizes(engines) engines = itertools.cycle(engines) engine = next(engines) labels = utils.load_labels(args.labels) if args.labels else None filtered_labels = set( l.strip() for l in args.filter.split(',')) if args.filter else None get_color = make_get_color(args.color, labels) draw_overlay = True yield utils.input_image_size(engine) output = None while True: tensor, layout, command = (yield output) inference_rate = next(fps_counter) if draw_overlay: start = time.monotonic() objs = engine.detect_with_input_tensor( tensor, threshold=args.threshold, top_k=args.top_k) inference_time = time.monotonic() - start objs = [convert(obj, labels) for obj in objs] if labels and filtered_labels: objs = [ obj for obj in objs if obj.label in filtered_labels ] objs = [ obj for obj in objs if args.min_area <= obj.bbox.area() <= args.max_area ] if args.print: print_results(inference_rate, objs) title = titles[engine] output = overlay(title, objs, get_color, inference_time, inference_rate, layout) else: output = None if command == 'o': draw_overlay = not draw_overlay elif command == 'n': engine = next(engines)
def render_gen(args): fps_counter = utils.avg_fps_counter(30) draw_overlay = True yield utils.input_image_size() while True: #tensor, layout, command = (yield output) inference_rate = next(fps_counter) if draw_overlay: start = time.monotonic() else: output = None
def render_gen(args): acc = accumulator(size=args.window, top_k=args.top_k) acc.send(None) # Initialize. fps_counter = utils.avg_fps_counter(30) engines, titles = utils.make_engines(args.model, ClassificationEngine) assert utils.same_input_image_sizes(engines) engines = itertools.cycle(engines) engine = next(engines) labels = utils.load_labels(args.labels) draw_overlay = True yield utils.input_image_size(engine) output = None while True: tensor, layout, command = (yield output) inference_rate = next(fps_counter) if draw_overlay: start = time.monotonic() results = engine.classify_with_input_tensor( tensor, threshold=args.threshold, top_k=args.top_k) inference_time = time.monotonic() - start results = [(labels[i], score) for i, score in results] results = acc.send(results) if args.print: print_results(inference_rate, results) title = titles[engine] output = overlay(title, results, inference_time, inference_rate, layout) else: output = None if command == 'o': draw_overlay = not draw_overlay elif command == 'n': engine = next(engines)