def __init__(self, model_path, mirror=False): """Creates a PoseEngine with given model. Args: model_path: String, path to TF-Lite Flatbuffer file. mirror: Flip keypoints horizontally Raises: ValueError: An error occurred when model output is invalid. """ BasicEngine.__init__(self, model_path) self._mirror = mirror self._input_tensor_shape = self.get_input_tensor_shape() if (self._input_tensor_shape.size != 4 or self._input_tensor_shape[3] != 3 or self._input_tensor_shape[0] != 1): raise ValueError( ('Image model should have input shape [1, height, width, 3]!' ' This model has {}.'.format(self._input_tensor_shape))) _, self.image_height, self.image_width, self.image_depth = \ self.get_input_tensor_shape() # The API returns all the output tensors flattened and concatenated. We # have to figure out the boundaries from the tensor shapes & sizes. offset = 0 self._output_offsets = [0] for size in self.get_all_output_tensors_sizes(): offset += size self._output_offsets.append(int(offset))
def __init__(self, model_path, mirror=False): BasicEngine.__init__(self, model_path) self._mirror = mirror self._input_tensor_shape = self.get_input_tensor_shape() _, self.image_height, self.image_width, self.image_depth = self.get_input_tensor_shape( ) offset = 0 self._output_offsets = [0] for size in self.get_all_output_tensors_sizes(): offset += size self._output_offsets.append(offset)
def __init__(self, model_path): """Creates a EmbeddingEngine with given model and labels. Args: model_path: String, path to TF-Lite Flatbuffer file. Raises: ValueError: An error occurred when model output is invalid. """ BasicEngine.__init__(self, model_path) output_tensors_sizes = self.get_all_output_tensors_sizes() if output_tensors_sizes.size != 1: raise ValueError( ('Dectection model should have only 1 output tensor!' 'This model has {}.'.format(output_tensors_sizes.size)))
def __init__(self, model_path): """Creates a BasicEngine with given model. Args: model_path: String, path to TF-Lite Flatbuffer file. Raises: ValueError: An error occurred when the output format of model is invalid. """ BasicEngine.__init__(self, model_path) output_tensors_sizes = self.get_all_output_tensors_sizes() if output_tensors_sizes.size != 1: raise ValueError( ('Classification model should have 1 output tensor only!' 'This model has {}.'.format(output_tensors_sizes.size)))
def __init__(self, model_path, mirror=False): """Creates a PoseEngine with given model. Args: model_path: String, path to TF-Lite Flatbuffer file. mirror: Flip keypoints horizontally Raises: ValueError: An error occurred when model output is invalid. """ BasicEngine.__init__(self, model_path) self._mirror = mirror self._input_tensor_shape = self.get_input_tensor_shape() if (self._input_tensor_shape.size != 4 or self._input_tensor_shape[3] != 3 or self._input_tensor_shape[0] != 1): raise ValueError( ('Image model should have input shape [1, height, width, 3]!' ' This model has {}.'.format(self._input_tensor_shape))) _, self.image_height, self.image_width, self.image_depth = self.get_input_tensor_shape( ) # The API returns all the output tensors flattened and concatenated. We # have to figure out the boundaries from the tensor shapes & sizes. offset = 0 self._output_offsets = [0] for size in self.get_all_output_tensors_sizes(): offset += int(size) self._output_offsets.append(offset) # Auto-detect stride size def calcStride(h, w, L): return int( (2 * h * w) / (math.sqrt(h**2 + 4 * h * L * w - 2 * h * w + w**2) - h - w)) heatmap_size = self.get_output_tensor_size(4) print("Heatmap size: ", heatmap_size) self.stride = calcStride(self.image_height, self.image_width, heatmap_size) self.heatmap_size = (self.image_width // self.stride + 1, self.image_height // self.stride + 1) print("Stride Guess: ", self.stride, self.heatmap_size)
def __init__(self, model_path): """Creates a DetectionEngine with given model. Args: model_path: String, path to TF-Lite Flatbuffer file. Raises: ValueError: An error occurred when model output is invalid. """ BasicEngine.__init__(self, model_path) output_tensors_sizes = self.get_all_output_tensors_sizes() if output_tensors_sizes.size != 4: raise ValueError( ('Dectection model should have 4 output tensors!' 'This model has {}.'.format(output_tensors_sizes.size))) self._tensor_start_index = [0] offset = 0 for i in range(3): offset = offset + output_tensors_sizes[i] self._tensor_start_index.append(offset)