def densepose_chart_predictions_to_dict(instances): segmentations = ToMaskConverter.convert(instances.pred_densepose, instances.pred_boxes, instances.image_size) results = [] for k in range(len(instances)): densepose_results_quantized = quantize_densepose_chart_result( ToChartResultConverter.convert(instances.pred_densepose[k], instances.pred_boxes[k])) densepose_results_quantized.labels_uv_uint8 = ( densepose_results_quantized.labels_uv_uint8.cpu()) segmentation = segmentations.tensor[k] segmentation_encoded = mask_utils.encode( np.require(segmentation.numpy(), dtype=np.uint8, requirements=["F"])) segmentation_encoded["counts"] = segmentation_encoded["counts"].decode( "utf-8") result = { "densepose": densepose_results_quantized, "segmentation": segmentation_encoded, } results.append(result) return results
def __call__(self, instances: Instances) -> BitMasks: """ Converts predicted data from `instances` into the GT mask data Args: instances (Instances): predicted results, expected to have `pred_densepose` field Returns: Boolean Tensor of the size of the input image that has non-zero values at pixels that are estimated to belong to the detected object """ return ToMaskConverter.convert(instances.pred_densepose, instances.pred_boxes, instances.image_size)