def handle(self, db_data, frame, points): # Lazy initialization if not self._plugin: self._plugin = make_plugin_or_core() self._network = make_network( os.path.join(_DEXTR_MODEL_DIR, 'dextr.xml'), os.path.join(_DEXTR_MODEL_DIR, 'dextr.bin')) self._input_blob = next(iter(self._network.inputs)) self._output_blob = next(iter(self._network.outputs)) if getattr(self._plugin, 'load_network', False): self._exec_network = self._plugin.load_network( self._network, 'CPU') else: self._exec_network = self._plugin.load(network=self._network) frame_provider = FrameProvider(db_data) image = frame_provider.get_frame(frame, frame_provider.Quality.ORIGINAL) image = PIL.Image.open(image[0]) numpy_image = np.array(image) points = np.asarray([[int(p["x"]), int(p["y"])] for p in points], dtype=int) bounding_box = (max(min(points[:, 0]) - _DEXTR_PADDING, 0), max(min(points[:, 1]) - _DEXTR_PADDING, 0), min( max(points[:, 0]) + _DEXTR_PADDING, numpy_image.shape[1] - 1), min( max(points[:, 1]) + _DEXTR_PADDING, numpy_image.shape[0] - 1)) # Prepare an image numpy_cropped = np.array(image.crop(bounding_box)) resized = cv2.resize(numpy_cropped, (_DEXTR_SIZE, _DEXTR_SIZE), interpolation=cv2.INTER_CUBIC).astype(np.float32) # Make a heatmap points = points - [min(points[:, 0]), min(points[:, 1]) ] + [_DEXTR_PADDING, _DEXTR_PADDING] points = (points * [ _DEXTR_SIZE / numpy_cropped.shape[1], _DEXTR_SIZE / numpy_cropped.shape[0] ]).astype(int) heatmap = np.zeros(shape=resized.shape[:2], dtype=np.float64) for point in points: gaussian_x_axis = np.arange(0, _DEXTR_SIZE, 1, float) - point[0] gaussian_y_axis = np.arange(0, _DEXTR_SIZE, 1, float)[:, np.newaxis] - point[1] gaussian = np.exp( -4 * np.log(2) * ((gaussian_x_axis**2 + gaussian_y_axis**2) / 100)).astype( np.float64) heatmap = np.maximum(heatmap, gaussian) cv2.normalize(heatmap, heatmap, 0, 255, cv2.NORM_MINMAX) # Concat an image and a heatmap input_dextr = np.concatenate( (resized, heatmap[:, :, np.newaxis].astype(resized.dtype)), axis=2) input_dextr = input_dextr.transpose((2, 0, 1)) pred = self._exec_network.infer( inputs={self._input_blob: input_dextr[np.newaxis, ...]})[ self._output_blob][0, 0, :, :] pred = cv2.resize(pred, tuple(reversed(numpy_cropped.shape[:2])), interpolation=cv2.INTER_CUBIC) result = np.zeros(numpy_image.shape[:2]) result[bounding_box[1]:bounding_box[1] + pred.shape[0], bounding_box[0]:bounding_box[0] + pred.shape[1]] = pred > _DEXTR_TRESHOLD # Convert a mask to a polygon result = np.array(result, dtype=np.uint8) cv2.normalize(result, result, 0, 255, cv2.NORM_MINMAX) contours = None if int(cv2.__version__.split('.')[0]) > 3: contours = cv2.findContours(result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)[0] else: contours = cv2.findContours(result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)[1] contours = max(contours, key=lambda arr: arr.size) if contours.shape.count(1): contours = np.squeeze(contours) if contours.size < 3 * 2: raise Exception( 'Less then three point have been detected. Can not build a polygon.' ) result = "" for point in contours: result += "{},{} ".format(int(point[0]), int(point[1])) result = result[:-1] return result
def data(self, request, pk): if request.method == 'POST': db_task = self.get_object() # call check_object_permissions as well serializer = DataSerializer(data=request.data) serializer.is_valid(raise_exception=True) db_data = serializer.save() db_task.data = db_data db_task.save() data = {k:v for k, v in serializer.data.items()} data['use_zip_chunks'] = serializer.validated_data['use_zip_chunks'] # if the value of stop_frame is 0, then inside the function we cannot know # the value specified by the user or it's default value from the database if 'stop_frame' not in serializer.validated_data: data['stop_frame'] = None task.create(db_task.id, data) return Response(serializer.data, status=status.HTTP_202_ACCEPTED) else: data_type = request.query_params.get('type', None) data_id = request.query_params.get('number', None) data_quality = request.query_params.get('quality', 'compressed') possible_data_type_values = ('chunk', 'frame', 'preview') possible_quality_values = ('compressed', 'original') if not data_type or data_type not in possible_data_type_values: return Response(data='data type not specified or has wrong value', status=status.HTTP_400_BAD_REQUEST) elif data_type == 'chunk' or data_type == 'frame': if not data_id: return Response(data='number not specified', status=status.HTTP_400_BAD_REQUEST) elif data_quality not in possible_quality_values: return Response(data='wrong quality value', status=status.HTTP_400_BAD_REQUEST) try: db_task = self.get_object() frame_provider = FrameProvider(db_task.data) if data_type == 'chunk': data_id = int(data_id) data_quality = FrameProvider.Quality.COMPRESSED \ if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL path = os.path.realpath(frame_provider.get_chunk(data_id, data_quality)) # Follow symbol links if the chunk is a link on a real image otherwise # mimetype detection inside sendfile will work incorrectly. return sendfile(request, path) elif data_type == 'frame': data_id = int(data_id) data_quality = FrameProvider.Quality.COMPRESSED \ if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL buf, mime = frame_provider.get_frame(data_id, data_quality) return HttpResponse(buf.getvalue(), content_type=mime) elif data_type == 'preview': return sendfile(request, frame_provider.get_preview()) else: return Response(data='unknown data type {}.'.format(data_type), status=status.HTTP_400_BAD_REQUEST) except APIException as e: return Response(data=e.default_detail, status=e.status_code) except Exception as e: msg = 'cannot get requested data type: {}, number: {}, quality: {}'.format(data_type, data_id, data_quality) slogger.task[pk].error(msg, exc_info=True) return Response(data=msg + '\n' + str(e), status=status.HTTP_400_BAD_REQUEST)