def predict(self, image): boxes = get_all_boxes(image) images = time.measure(lambda: get_input(image, boxes), 'image preprocessing') result = time.measure(lambda: self.model.predict(images), 'localization') result = non_max_suppression(result) return result
def predict(self, image): boxes = get_all_boxes(image) images = time.measure(lambda: get_input(image, boxes), 'image preprocessing') cls, reg = time.measure(lambda: self.model.predict(images), 'localization') result = np.concatenate((cls[..., 1:], reg), axis=-1) result = non_max_suppression(result) return result
def predict_multiple(self, images): boxes = [get_all_boxes(image) for image in images] inputs = time.measure(lambda: get_inputs(images, boxes), 'image preprocessing') cls, reg = time.measure(lambda: self.model.predict(inputs), 'localization') results = np.reshape(np.concatenate((cls[..., 1:], reg), axis=-1), (len(boxes), len(boxes[0]), 5)) results = [non_max_suppression(result) for result in results] return results
def detect(self, image): objects = time.measure(lambda: self.detector.predict(image), 'detection') extend_bounding_boxes(objects, 0.15) images = time.measure( lambda: prepare_for_classification(objects, image), 'image preprocessing') labels = time.measure(lambda: self.classifier.predict(images), 'classification') print(objects, labels) return objects, labels
def predict_multiple(self, images): boxes = get_all_boxes() inputs, preprocessed_images = time.measure(lambda: get_all_inputs(images, boxes), 'preprocess') results = None for i in range(0, 4): cls, reg = time.measure(lambda: self.models[i].predict(inputs[i]), f'detection {i}') cls, reg = np.asarray(cls), np.asarray(reg) result = np.reshape( np.concatenate((cls[..., 1:], reg), axis=-1), (len(images), len(boxes[i]), 5) ) if results is None: results = result else: results = np.concatenate((results, result), axis=1) results = [non_max_suppression(result) for result in results] box_scales = np.asarray([[len(image[0]) / 256, len(image) / 256, len(image[0]) / 256, len(image) / 256] for image in images]) return results, preprocessed_images, box_scales
def detect(request): body = json.parse(request) path = body["path"] print(path) image = load_image(path) print(len(image), len(image[0])) objects, labels = time.measure( lambda: detector.detect_multiple(np.asarray([image])), 'the whole process') return HttpResponse(json.convert(objects[0], labels[0]), content_type="application/json")
def detect_multiple(self, images): objects, preprocessed_images, box_scales = time.measure( lambda: self.detector.predict_multiple(images), 'detection') if len(objects[0]) == 0: return [[]], [[]] preprocessed = time.measure( lambda: resize_for_classification(objects, preprocessed_images, images), 'preprocessing') labels = time.measure(lambda: self.classifier.predict(preprocessed), 'classification') results = [] j = 0 for i in range(0, len(images)): results.append([labels[k] for k in range(j, j + len(objects[i]))]) j += len(objects[i]) r_objects = [] for i in range(0, len(objects)): r_objects.append([]) objs = objects[i] r_objects[i] = [[obj[0], *obj[1:] * box_scales[i]] for obj in objs] print(r_objects, results) return r_objects, results
import sys from pathlib import Path import numpy as np from server.server import run mode = sys.argv[1] assert mode == 'runserver' or mode == 'local' if mode == 'runserver': run() else: from utils import load_image, time from trafficsigndetector.traffic_sign_detector import TrafficSignDetector image = load_image('{root}/assets/images/{image}.png'.format( root=Path(__file__).parent, image='testimage')) detector = TrafficSignDetector() time.measure( lambda: detector.detect_multiple( np.asarray([image for _ in range(0, 1)])), 'whole process')