async def run(self, input_data, draw_boxes, predict_batch):
     image_path = '/main/' + str(input_data.filename)
     open(image_path, 'wb').write(input_data.file.read())
     try:
         post_process = await self.processing(image_path, predict_batch)
     except ApplicationError as e:
         os.remove(image_path)
         raise e
     except Exception as e:
         os.remove(image_path)
         raise InvalidInputData()
         # pass
     if not draw_boxes:
         os.remove(image_path)
         return post_process
     else:
         try:
             self.draw_bounding_boxes(input_data, post_process['bounding-boxes'])
         except ApplicationError as e:
             raise e
         except Exception as e:
             raise e
    async def infer(self, input_data, draw, predict_batch):
        await asyncio.sleep(0.00001)
        try:
            pillow_image = Image.open(input_data.file).convert('RGB')
            np_image = np.array(pillow_image)
        except Exception as e:
            raise InvalidInputData('corrupted image')
        try:
            with open(self.model_path + '/config.json') as f:
                data = json.load(f)
        except Exception as e:
            raise InvalidModelConfiguration(
                'config.json not found or corrupted')

        detection_threshold = data['detection_threshold']
        non_max_suppression_threshold = data['nms_threshold']
        hier_threshold = data['hier_threshold']
        height, width, depth = np_image.shape
        num = c_int(0)
        pnum = pointer(num)
        im = self.array_to_image(np_image)
        self.predict_image(self.net, im)
        dets = self.get_network_boxes(self.net, width, height,
                                      detection_threshold, hier_threshold,
                                      self.pmap, self.relative, pnum,
                                      self.letter)
        num = pnum[0]
        self.do_nms_obj(dets, num, self.meta.classes,
                        non_max_suppression_threshold)
        res = []
        for j in range(num):
            for i in range(self.meta.classes):
                if dets[j].prob[i] > detection_threshold:
                    b = dets[j].bbox
                    left = (b.x - b.w / 2)

                    right = (b.x + b.w / 2)

                    top = (b.y - b.h / 2)

                    bottom = (b.y + b.h / 2)

                    if (left < 0):
                        left = 0
                    if (right > im.w - 1):
                        right = im.w - 1
                    if (top < 0):
                        top = 0
                    if (bottom > im.h - 1):
                        bottom = im.h - 1

                    res.append({
                        'ObjectClassId':
                        i,
                        'ObjectClassName':
                        self.meta.names[i].decode('utf-8'),
                        'confidence':
                        dets[j].prob[i] * 100,
                        'coordinates': {
                            'left': left,
                            'top': top,
                            'right': right,
                            'bottom': bottom
                        }
                    })
        self.free_detections(dets, num)
        if predict_batch:
            response = dict([('bounding-boxes', res),
                             ('ImageName', input_data.filename)])
        else:
            response = dict([('bounding-boxes', res)])
        if not draw:
            return response
        else:
            try:
                self.draw_image(pillow_image, response)
            except ApplicationError as e:
                raise e
            except Exception as e:
                raise e
Example #3
0
    async def processing(self, image_path, predict_batch):
        """
        Preprocesses image and form a prediction layout.
        :param predict_batch: Boolean
        :param image_path: Image path
        :return: Image prediction
        """
        await asyncio.sleep(0.00001)
        try:
            with open(self.model_path + '/config.json') as f:
                data = json.load(f)
        except Exception as e:
            raise InvalidModelConfiguration(
                'config.json not found or corrupted')
        conf_threshold = data['confidence']
        nms_threshold = data['nms_threshold']
        # load image
        try:
            image = cv2.imread(image_path)
            # In case of monochrome 1 channel image
            if len(image.shape) == 2:
                # Grayscale to RGB
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
            # For PNG images, only take the first 3 channels
            if image.shape[2] > 3:
                # RGBA to RGB
                image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
        except Exception as e:
            raise InvalidInputData()
        width = image.shape[1]
        height = image.shape[0]
        # create input blob
        blob = cv2.dnn.blobFromImage(image, self.scale,
                                     (self.image_width, self.image_height),
                                     (self.R_mean, self.G_mean, self.B_mean),
                                     self.swapRB, self.crop)
        # feed the blob to the network
        self.net.setInput(blob)
        # get the output layers
        output_layers = self.net.forward(self.__get_output_layers__())
        # for each detection from each output layer
        # get the confidence, class id, bounding box params
        # and ignore detections below threshold
        boxes = []
        class_ids = []
        confidences = []
        for layer in output_layers:
            for detection in layer:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]
                if confidence * 100 > conf_threshold:
                    center_x = int(detection[0] * width)
                    center_y = int(detection[1] * height)
                    w = int(detection[2] * width)
                    h = int(detection[3] * height)
                    x = center_x - w / 2
                    y = center_y - h / 2
                    class_ids.append(int(class_id))
                    confidences.append(float(confidence * 100))
                    boxes.append([x, y, w, h])

        # apply non-max suppression to remove duplicate bounding boxes for same object
        remaining_indices = cv2.dnn.NMSBoxes(boxes, confidences,
                                             conf_threshold, nms_threshold)

        for i in range(len(boxes)):
            # i = i[0]
            box = boxes[i]
            x = box[0]
            y = box[1]
            w = box[2]
            h = box[3]

        # release resources
        cv2.destroyAllWindows()

        # return the remaining boxes
        output_bboxes = []
        for i in remaining_indices:
            box = boxes[i[0]]
            output_bboxes.append({
                'ObjectClassName':
                self.labels[class_ids[i[0]]],
                'ObjectClassId':
                class_ids[i[0]],
                'confidence':
                confidences[i[0]],
                'coordinates': {
                    'left': int(box[0]),
                    'right': int(box[0]) + int(box[2]),
                    'top': int(box[1]),
                    'bottom': int(box[1]) + int(box[3])
                }
            })
        if predict_batch:
            predictions_dict = dict([('bounding-boxes', output_bboxes),
                                     ('ImageName', image_path.split('/')[2])])
        else:
            predictions_dict = dict([('bounding-boxes', output_bboxes)])
        return predictions_dict
Example #4
0
    async def infer(self, input_data, draw, predict_batch):
        await asyncio.sleep(0.00001)
        try:
            pillow_image = Image.open(input_data.file).convert('RGB')
            np_image = np.array(pillow_image)
        except Exception as e:
            raise InvalidInputData('corrupted image')
        try:
            with open(self.model_path + '/config.json') as f:
                data = json.load(f)
        except Exception as e:
            raise InvalidModelConfiguration(
                'config.json not found or corrupted')
        conf_threshold = data['confidence']
        nms_threshold = data['nms_threshold']
        height, width, depth = np_image.shape
        # create input blob
        blob = cv2.dnn.blobFromImage(np_image, self.scale,
                                     (self.image_width, self.image_height),
                                     (self.R_mean, self.G_mean, self.B_mean),
                                     self.swapRB, self.crop)
        # feed the blob to the network
        self.net.setInput(blob)
        # get the output layers
        output_layers = self.net.forward(self.__get_output_layers__())
        # for each detection from each output layer
        # get the confidence, class id, bounding box params
        # and ignore detections below threshold
        boxes = []
        class_ids = []
        confidences = []
        for layer in output_layers:
            for detection in layer:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]
                if confidence * 100 > conf_threshold:
                    center_x = int(detection[0] * width)
                    center_y = int(detection[1] * height)
                    w = int(detection[2] * width)
                    h = int(detection[3] * height)
                    x = center_x - w / 2
                    y = center_y - h / 2
                    class_ids.append(int(class_id))
                    confidences.append(float(confidence * 100))
                    boxes.append([x, y, w, h])

        # apply non-max suppression to remove duplicate bounding boxes for same object
        remaining_indices = cv2.dnn.NMSBoxes(boxes, confidences,
                                             conf_threshold, nms_threshold)

        for i in range(len(boxes)):
            # i = i[0]
            box = boxes[i]
            x = box[0]
            y = box[1]
            w = box[2]
            h = box[3]

        # release resources
        cv2.destroyAllWindows()

        # return the remaining boxes
        output_bboxes = []
        for i in remaining_indices:
            box = boxes[i[0]]
            output_bboxes.append({
                'ObjectClassName':
                self.labels[class_ids[i[0]]],
                'ObjectClassId':
                class_ids[i[0]],
                'confidence':
                confidences[i[0]],
                'coordinates': {
                    'left': int(box[0]),
                    'right': int(box[0]) + int(box[2]),
                    'top': int(box[1]),
                    'bottom': int(box[1]) + int(box[3])
                }
            })
        if predict_batch:
            response = dict([('bounding-boxes', output_bboxes),
                             ('ImageName', input_data.filename)])
        else:
            response = dict([('bounding-boxes', output_bboxes)])
        if not draw:
            return response
        else:
            try:
                self.draw_image(pillow_image, response)
            except ApplicationError as e:
                raise e
            except Exception as e:
                raise e
Example #5
0
    async def infer(self, input_data, draw, predict_batch):
        await asyncio.sleep(0.00001)
        try:
            pillow_image = Image.open(input_data.file).convert('RGB')
            np_image = np.array(pillow_image)
        except Exception as e:
            raise InvalidInputData('corrupted image')
        try:
            with open(self.model_path + '/config.json') as f:
                data = json.load(f)
        except Exception as e:
            raise InvalidModelConfiguration(
                'config.json not found or corrupted')
        json_confidence = data['confidence']
        json_predictions = data['predictions']
        with self.detection_graph.as_default():
            # Expand dimension since the model expects image to have shape [1, None, None, 3].
            img_expanded = np.expand_dims(np_image, axis=0)
            (boxes, scores, classes, num) = self.sess.run(
                [self.d_boxes, self.d_scores, self.d_classes, self.num_d],
                feed_dict={self.image_tensor: img_expanded})
        classes_names = ([self.category_index.get(i) for i in classes[0]])
        names_start = []
        for name in classes_names:
            if name is not None:
                names_start.append(name['name'])
        height, width, depth = np_image.shape
        names = []
        confidence = []
        ids = []
        bounding_boxes = []
        for i in range(json_predictions):
            if scores[0][i] * 100 >= json_confidence:
                ymin = int(round(
                    boxes[0][i][0] *
                    height)) if int(round(boxes[0][i][0] * height)) > 0 else 0
                xmin = int(round(
                    boxes[0][i][1] *
                    width)) if int(round(boxes[0][i][1] * height)) > 0 else 0
                ymax = int(round(
                    boxes[0][i][2] *
                    height)) if int(round(boxes[0][i][2] * height)) > 0 else 0
                xmax = int(round(
                    boxes[0][i][3] *
                    width)) if int(round(boxes[0][i][3] * height)) > 0 else 0
                tmp = dict([('left', xmin), ('top', ymin), ('right', xmax),
                            ('bottom', ymax)])
                bounding_boxes.append(tmp)
                confidence.append(float(scores[0][i] * 100))
                ids.append(int(classes[0][i]))
                names.append(names_start[i])

        responses_list = zip(names, confidence, bounding_boxes, ids)

        output = []
        for response in responses_list:
            tmp = dict([('ObjectClassName', response[0]),
                        ('confidence', response[1]),
                        ('coordinates', response[2]),
                        ('ObjectClassId', response[3])])
            output.append(tmp)
        if predict_batch:
            response = dict([('bounding-boxes', output),
                             ('ImageName', input_data.filename)])
        else:
            response = dict([('bounding-boxes', output)])

        if not draw:
            return response
        else:
            try:
                self.draw_image(pillow_image, response)
            except ApplicationError as e:
                raise e
            except Exception as e:
                raise e
Example #6
0
	async def infer(self, input_data, draw):
		await asyncio.sleep(0.00001)
		try:
			pillow_image = Image.open(input_data.file).convert('RGB')
			np_image = np.array(pillow_image)
		except Exception as e:
			raise InvalidInputData('corrupted image')
		try:
			with open(self.model_path + '/config.json') as f:
				data = json.load(f)
		except Exception as e:
			raise InvalidModelConfiguration('config.json not found or corrupted')
		json_confidence = data['confidence']
		json_predictions = data['predictions']
		
		n, c, h, w = self.net.input_info[self.input_name].input_data.shape
		images = np.zeros((n, c, h, w))
		# Read image to array
		img = np_image
		# Preprocess image
		ih, iw = img.shape[:-1]
		if (ih, iw) != (h, w):
			img = cv2.resize(img, (w, h))
		# Change data layout from HWC to CHW
		img = img.transpose((2, 0, 1))
		images[0] = img

		input_dict = {self.input_name: images}
		if self.input_info_name is not None:
			input_dict[self.input_info_name] = np.array([[w, h, c]])
		result = self.exec_net.infer(inputs=input_dict)

		output_key = "detection_output"
		output = result[output_key]
		output = np.squeeze(output, (0, 1))

		output_bboxes = []
		for batch_id, class_id, confidence, x1, y1, x2, y2 in output:
			bbox = list(np.array([x1*iw,
						y1*ih,
						x2*iw,
						y2*ih]).astype(float))
			if batch_id == -1:
				break
			if confidence * 100 >= json_confidence:
				output_bboxes.append( {
					"ObjectClassName": self.classes[int(class_id-1)],
					"ObjectClassId": int(class_id),
					"confidence": float(confidence * 100),
					"coordinates": {
							'left': int(bbox[0]),
							'right': int(bbox[2]),
							'top': int(bbox[1]),
							'bottom': int(bbox[3])
						}
					}
				)
		response = dict([('bounding-boxes', output_bboxes)])

		if not draw:
			return response
		else:
			try:
				self.draw_image(pillow_image, response)
			except ApplicationError as e:
				raise e
			except Exception as e:
				raise e
    async def processing(self,
                         image_path,
                         predict_batch,
                         resize_w=0,
                         resize_h=0,
                         pmap=None,
                         relative=0,
                         letter=0):
        """
        Preprocesses image and form a prediction layout.
        :param relative: relative size
        :param pmap: mean average precision
        :param resize_h: image height scale
        :param resize_w: image width scale
        :param letter: image aspect ratio
        :param predict_batch: Boolean
        :param image_path: Image path
        :return: Image prediction
        """
        await asyncio.sleep(0.00001)
        try:
            with open(self.model_path + '/config.json') as f:
                data = json.load(f)
        except Exception as e:
            raise InvalidModelConfiguration(
                'config.json not found or corrupted')

        detection_threshold = data['detection_threshold']
        non_max_suppression_threshold = data['nms_threshold']
        hier_threshold = data['hier_threshold']

        # load image
        try:
            im = self.load_image(self.__ensure__ctype__string(image_path),
                                 resize_w, resize_h)
        except Exception as e:
            raise InvalidInputData()
        num = c_int(0)
        pnum = pointer(num)
        self.predict_image(self.net, im)
        dets = self.get_network_boxes(self.net, im.w, im.h,
                                      detection_threshold, hier_threshold,
                                      pmap, relative, pnum, letter)
        num = pnum[0]
        self.do_nms_obj(dets, num, self.meta.classes,
                        non_max_suppression_threshold)
        res = []
        for j in range(num):
            for i in range(self.meta.classes):
                if dets[j].prob[i] > detection_threshold:
                    b = dets[j].bbox
                    res.append({
                        'ObjectClassId':
                        i,
                        'ObjectClassName':
                        self.meta.names[i].decode('utf-8'),
                        'confidence':
                        dets[j].prob[i] * 100,
                        'coordinates': {
                            'left': int(b.x) - int(b.w / 2),
                            'top': int(b.y) - int(b.h / 2),
                            'right': int(b.x) + int(b.w / 2),
                            'bottom': int(b.y) + int(b.h / 2)
                        }
                    })
        self.free_image(im)
        self.free_detections(dets, num)
        if predict_batch:
            predictions_dict = dict([('bounding-boxes', res),
                                     ('ImageName', image_path.split('/')[2])])
        else:
            predictions_dict = dict([('bounding-boxes', res)])
        return predictions_dict
Example #8
0
    async def infer(self, input_data, draw, predict_batch):

        await asyncio.sleep(0.00001)
        try:
            pillow_image = Image.open(input_data.file).convert('RGB')
            np_image = np.array(pillow_image)
        except Exception as e:
            raise InvalidInputData('corrupted image')
        try:
            with open(self.model_path + '/config.json') as f:
                data = json.load(f)
        except Exception as e:
            raise InvalidModelConfiguration(
                'config.json not found or corrupted')
        json_confidence = data['confidence']
        json_predictions = data['predictions']

        input_tensor = tf.convert_to_tensor(np_image)
        input_tensor = input_tensor[tf.newaxis, ...]
        detections = self.detect_fn(input_tensor)

        height, width, depth = np_image.shape

        names = []
        confidence = []
        ids = []
        bounding_boxes = []
        names_start = []
        scores = detections["detection_scores"][0].numpy()
        boxes = detections["detection_boxes"][0].numpy()
        classes = (detections['detection_classes'][0].numpy()).astype(int)
        classes_names = ([self.category_index.get(i) for i in classes])
        for name in classes_names:
            if name is not None:
                names_start.append(name['name'])

        for i in range(json_predictions):
            if scores[i] * 100 >= json_confidence:
                ymin = int(round(
                    boxes[i][0] *
                    height)) if int(round(boxes[i][0] * height)) > 0 else 0
                xmin = int(round(
                    boxes[i][1] *
                    width)) if int(round(boxes[i][1] * height)) > 0 else 0
                ymax = int(round(
                    boxes[i][2] *
                    height)) if int(round(boxes[i][2] * height)) > 0 else 0
                xmax = int(round(
                    boxes[i][3] *
                    width)) if int(round(boxes[i][3] * height)) > 0 else 0
                tmp = dict([('left', xmin), ('top', ymin), ('right', xmax),
                            ('bottom', ymax)])
                bounding_boxes.append(tmp)
                confidence.append(float(scores[i] * 100))
                ids.append(int(classes[i]))
                names.append(names_start[i])

        responses_list = zip(names, confidence, bounding_boxes, ids)

        output = []
        for response in responses_list:
            tmp = dict([('ObjectClassName', response[0]),
                        ('confidence', response[1]),
                        ('coordinates', response[2]),
                        ('ObjectClassId', response[3])])
            output.append(tmp)

        if predict_batch:
            response = dict([('bounding-boxes', output),
                             ('ImageName', input_data.filename)])
        else:
            response = dict([('bounding-boxes', output)])
        if not draw:
            return response
        else:
            try:
                self.draw_image(pillow_image, response)
            except ApplicationError as e:
                raise e
            except Exception as e:
                raise e