def apply_batch(self, inputs: Sequence[Tuple], spec: Optional[TensorSpec] = None): r"""Apply transform on batch input data.""" if not isinstance(inputs, OrderedDict): if spec is None and self.is_spatial == True: self._shape_info = None spec = TensorSpec(shape=tensor_to_shape( inputs, need_exclude_batch_axis=True, is_singleton=True), object_type=ObjectType.rgb) return self.apply(inputs, spec) else: results = OrderedDict() sampledata = list(inputs.values())[0] spec = inputs.key_list[0] if (isinstance(sampledata, Iterable) and not isinstance(sampledata, np.ndarray)) or ( is_tensor_like(sampledata) and spec.ndim == sampledata.ndim): for i in range(len(sampledata)): self._shape_info = None for spec, data in inputs.items(): if spec not in results: results[spec] = [] results[spec].append(self.apply(data[i], spec)) else: self._shape_info = None for spec, data in inputs.items(): results[spec] = self.apply(data, spec) return results
def data_preprocess(self, img_data): if self._model is not None: self._model.input_spec.object_type = ObjectType.rgb if not hasattr(self, '_preprocess_flow') or self._preprocess_flow is None: self._preprocess_flow = [] if img_data.ndim == 4: return to_tensor( to_numpy([self.data_preprocess(im) for im in img_data])) if len(self._preprocess_flow) == 0: return image_backend_adaption(img_data) if isinstance(img_data, np.ndarray): for fc in self._preprocess_flow: if self._model is not None and self.signature is not None and len( self.signature ) > 1 and self._model.input_spec is not None: img_data = fc(img_data, spec=self._model.input_spec) else: img_data = fc(img_data) img_data = image_backend_adaption(img_data) if self._model.input_spec is None: self._model.input_spec = TensorSpec(shape=tensor_to_shape( to_tensor(img_data), need_exclude_batch_axis=True, is_singleton=True), object_type=ObjectType.rgb, name='input') self.input_shape = self._model.input_spec.shape[1:] return img_data else: return img_data
def apply_batch(self, inputs: Sequence[Tuple], spec: Optional[TensorSpec] = None): if spec is None and self.is_spatial == True: spec = TensorSpec(shape=tensor_to_shape(inputs[0]), object_type=object_type_inference(inputs[0])) return tuple(self.apply(input, spec) for input in inputs)
def apply_batch(self, inputs: Sequence[Tuple], spec: Optional[TensorSpec] = None): r"""Apply transform on batch input data.""" if not isinstance(inputs, OrderedDict): self._text_info = None if spec is None and self.is_spatial == True: spec = TensorSpec(shape=tensor_to_shape( inputs, need_exclude_batch_axis=True, is_singleton=True), object_type=ObjectType.corpus) self._precalculate(inputs) return self.apply(inputs, spec) else: results = OrderedDict() self._text_info = None is_precalculate = False for k, v in inputs.items(): if k.object_type is None: k.object_type = object_type_inference(v) if isinstance( k, TensorSpec) and k.object_type == ObjectType.corpus: self._precalculate(v) is_precalculate = True if not is_precalculate: self._precalculate(inputs.value_list[0]) for spec, data in inputs.items(): results[spec] = self.apply(data, spec) return results
def add_training_item(self, training_item, name=None, start_epoch=0): n = len(self.training_items) if name is not None and len(name) > 0: self.training_names[n] = name training_item.name = name elif training_item.name is not None and len(training_item.name) > 0: self.training_names[n] = training_item.name else: training_item.name = 'model {0}'.format(n) self.training_names[n] = 'model {0}'.format(n) self.training_items[n] = training_item self.training_items[n].start_epoch = start_epoch # backward compatibility for k, v in training_item.inputs.items(): if isinstance(v, tuple) and all( [isinstance(item, numbers.Integral) for item in v]): training_item.inputs[k] = TensorSpec(shape=to_tensor(v), name=training_item.name) training_item.signature.inputs[k] = TensorSpec( shape=to_tensor(v), name=training_item.name) elif isinstance(v, TensorSpec): training_item.signature.inputs[k] = v for k, v in training_item.outputs.items(): if isinstance(v, tuple) and all( [isinstance(item, numbers.Integral) for item in v]): training_item.outputs[k] = TensorSpec(shape=to_tensor(v), name=training_item.name) training_item.signature.outputs[k] = TensorSpec( shape=to_tensor(v), name=training_item.name) elif isinstance(v, TensorSpec): training_item.signature.outputs[k] = v if isinstance( training_item.model, Layer ) and training_item.signature != training_item.model.signature: training_item.model.signature = None training_item.signature = training_item.model.signature return self
def apply_batch(self, inputs: Sequence[Tuple], spec: Optional[TensorSpec] = None): r"""Apply transform on batch input data.""" if not isinstance(inputs, OrderedDict): if spec is None and self.is_spatial == True: self._shape_info = None spec = TensorSpec(shape=tensor_to_shape( inputs, need_exclude_batch_axis=True, is_singleton=True), object_type=ObjectType.rgb) return self.apply(inputs, spec) else: results = OrderedDict() self._shape_info = None for spec, data in inputs.items(): results[spec] = self.apply(data, spec) return results
def __call__(self, img, spec: TensorSpec = None, **kwargs): if isinstance(img, np.ndarray): start_time = time.time() if spec is None: spec = TensorSpec(shape=to_tensor(img.shape), object_type=object_type_inference(img)) if spec.object_type == ObjectType.rgb or spec.object_type == ObjectType.rgb or spec.object_type == ObjectType.gray: if (img.ndim == 3 and img.shape[0] in [1, 3, 4]): img = img.transpose(1, 2, 0) for i in range(len(self.policies)): try: item = self.policies[i] img = item(img, spec=spec) except Exception as e: print(e) img = image_backend_adaption(img) self.pass_cnt += 1 self.pass_time_spend += float(time.time() - start_time) return img
def infer_single_image(self, img, scale=1, verbose=False): time_time = None if verbose: time_time = time.time() print("==----- starting infer {0} -----=======".format(img)) if self._model.built: try: self._model.to(self.device) self._model.eval() if self._model.input_spec is None: self._model.input_spec = TensorSpec( shape=TensorShape([None, 3, 608, 608]), object_type=ObjectType.rgb) if self._model.input_spec.object_type is None: self._model.input_spec.object_type = ObjectType.rgb img = image2array(img) if img.shape[-1] == 4: img = img[:, :, :3] img_orig = img.copy() rescale_scale = 1 for func in self.preprocess_flow: if (inspect.isfunction(func) or isinstance( func, Transform)) and func is not image_backend_adaption: img = func(img, spec=self._model.input_spec) if (inspect.isfunction(func) and func.__qualname__ == 'resize.<locals>.img_op') or ( isinstance(func, Transform) and func.name == 'resize'): rescale_scale = func.scale img = image_backend_adaption(img) inp = to_tensor(np.expand_dims(img, 0)).to( torch.device("cuda" if self._model.weights[0].data. is_cuda else "cpu")).to( self._model.weights[0].data.dtype) if verbose: print("======== data preprocess time:{0:.5f}".format( (time.time() - time_time))) time_time = time.time() boxes = self._model(inp)[0] if verbose: print("======== infer time:{0:.5f}".format( (time.time() - time_time))) time_time = time.time() mask = boxes[:, 4] > self.detection_threshold boxes = boxes[mask] if verbose: print(' detection threshold:{0}'.format( self.detection_threshold)) print(' {0} bboxes keep!'.format(len(boxes))) if boxes is not None and len(boxes) > 0: boxes = concate([xywh2xyxy(boxes[:, :4]), boxes[:, 4:]], axis=-1) boxes = to_numpy(boxes) if len(boxes) > 1: box_probs, keep = self.hard_nms( boxes[:, :5], nms_threshold=self.nms_threshold, top_k=-1, ) boxes = boxes[keep] print(' iou threshold:{0}'.format( self.nms_threshold)) print(' {0} bboxes keep!'.format(len(boxes))) boxes[:, :4] /= scale boxes[:, :4] = np.round(boxes[:, :4], 0) if verbose: print("======== bbox postprocess time:{0:.5f}".format( (time.time() - time_time))) time_time = time.time() # boxes = boxes * (1 / scale[0]) locations = boxes[:, :4] probs = boxes[:, 4] labels = np.argmax(boxes[:, 5:], -1).astype(np.int32) if verbose and locations is not None: for i in range(len(locations)): print( ' box{0}: {1} prob:{2:.2%} class:{3}'. format( i, [ np.round(num, 4) for num in locations[i].tolist() ], probs[i], labels[i] if self.class_names is None or int(labels[i]) >= len(self.class_names) else self.class_names[int(labels[i])])) return img_orig, locations, labels, probs else: return img_orig, None, None, None except: PrintException() else: raise ValueError('the model is not built yet.')