def _set_item(self, name, title, image_url, ann: Union[Annotation, dict] = None): setattr(self, f"_{name}_title", title) setattr(self, f"_{name}_image_url", image_url) res_ann = Annotation((1, 1)) if ann is not None: if type(ann) is dict: res_ann = Annotation.from_json(ann, self._project_meta) else: res_ann = ann.clone() setattr(self, f"_{name}_ann", res_ann)
def _do_single_img_inference(self, img, in_msg): in_project_meta = self._in_project_meta_from_msg(in_msg) ann_json = in_msg.get('annotation') if ann_json is not None: if in_project_meta is None: raise ValueError('In order to perform inference with annotation you must specify the appropriate' ' project meta.') ann = Annotation.from_json(ann_json, in_project_meta) else: in_project_meta = in_project_meta or ProjectMeta() ann = Annotation(img.shape[:2]) inference_mode = self._make_inference_mode(in_msg.get(MODE, {}), in_project_meta) inference_result = inference_mode.infer_annotate(img, ann) return inference_result.to_json()
def run_evaluation(self): progress = Progress('metric evaluation', self._project_gt.total_items) for ds_name in self._project_gt.datasets.keys(): ds_gt = self._project_gt.datasets.get(ds_name) ds_pred = self._project_pred.datasets.get(ds_name) for sample_name in ds_gt: try: ann_gt = Annotation.load_json_file(ds_gt.get_ann_path(sample_name), self._project_gt.meta) ann_pred = Annotation.load_json_file(ds_pred.get_ann_path(sample_name), self._project_pred.meta) self._metric.add_pair(ann_gt, ann_pred) except ValueError as e: logger.warning('An error has occured ({}). Sample "{}" in dataset "{}" will be skipped' .format(str(e), sample_name, ds_gt.name)) progress.iter_done_report()
def filter_objects_by_area(ann: Annotation, classes: List[str], comparator=operator.lt, thresh_percent: float = None) -> Annotation: # @ TODO: add size mode """ Deletes labels less (or greater) than specified percentage of image area. Args ann: Input annotation. classes: List of classes to filter. comparator: Comparison function. thresh_percent: Threshold percent value of image area. Returns: Annotation containing filtered labels. """ imsize = ann.img_size img_area = float(imsize[0] * imsize[1]) def _del_filter_percent(label: Label): if label.obj_class.name in classes: fig_area = label.area area_percent = 100.0 * fig_area / img_area if comparator(area_percent, thresh_percent): # satisfied condition return [] # action 'delete' return [label] return ann.transform_labels(imsize, _del_filter_percent)
def _do_infer_annotate(self, img: np.ndarray, ann: Annotation) -> Annotation: result_ann = ann.clone() roi = _make_cropped_rectangle(ann.img_size, self._config[BOUNDS]) roi_ann = _get_annotation_for_bbox(img, roi, self._model) result_ann = result_ann.add_labels( _replace_or_drop_labels_classes(roi_ann.labels, self._model_class_mapper, self._model_tag_meta_mapper)) img_level_tags = make_renamed_tags(roi_ann.img_tags, self._model_tag_meta_mapper, skip_missing=True) result_ann = result_ann.add_labels( _maybe_make_bbox_label(roi, self._intermediate_bbox_class, tags=img_level_tags)) result_ann = result_ann.add_tags(img_level_tags) if self._config.get(SAVE_PROBABILITIES, False) is True: result_problabels = _replace_or_drop_labels_classes( roi_ann.pixelwise_scores_labels, self._model_class_mapper, self._model_tag_meta_mapper) result_ann = result_ann.add_pixelwise_score_labels( result_problabels) return result_ann
def rotate(img: np.ndarray, ann: Annotation, degrees: float, mode: str=RotationModes.KEEP) ->\ (np.ndarray, Annotation): # @TODO: add "preserve_size" mode """ Rotates the image by random angle. Args: img: Input image array. ann: Input annotation. degrees: Rotation angle, counter-clockwise. mode: parameter: "keep" - keep original image data, then new regions will be filled with black color; "crop" - crop rotated result to exclude black regions; Returns: A tuple containing rotated image array and annotation. """ _validate_image_annotation_shape(img, ann) rotator = ImageRotator(img.shape[:2], degrees) if mode == RotationModes.KEEP: rect_to_crop = None elif mode == RotationModes.CROP: rect_to_crop = rotator.inner_crop else: raise NotImplementedError('Wrong black_regions mode.') res_img = rotator.rotate_img(img, use_inter_nearest=False) res_ann = ann.rotate(rotator) if rect_to_crop is not None: res_img = sly_image.crop(res_img, rect_to_crop) res_ann = res_ann.relative_crop(rect_to_crop) return res_img, res_ann
def bitwise_mask(ann: Annotation, class_mask: str, classes_to_correct: List[str], bitwise_op: Callable[[np.ndarray, np.ndarray], np.ndarray] = np.logical_and) -> Annotation: """ Performs bitwise operation between two masks. Uses one target mask to correct all others. Args ann: Input annotation. class_mask: Class name of target mask. classes_to_correct: List of classes which will be corrected using target mask. bitwise_op: Bitwise numpy function to process masks.For example: "np.logical_or", "np.logical_and", "np.logical_xor". Returns: Annotation containing corrected Bitmaps. """ imsize = ann.img_size def find_mask_class(labels, class_mask_name): for label in labels: if label.obj_class.name == class_mask_name: if not isinstance(label.geometry, Bitmap): raise RuntimeError('Class <{}> must be a Bitmap.'.format(class_mask_name)) return label mask_label = find_mask_class(ann.labels, class_mask) if mask_label is not None: target_original, target_mask = mask_label.geometry.origin, mask_label.geometry.data full_target_mask = np.full(imsize, False, bool) full_target_mask[target_original.row:target_original.row + target_mask.shape[0], target_original.col:target_original.col + target_mask.shape[1]] = target_mask def perform_op(label): if label.obj_class.name not in classes_to_correct or label.obj_class.name == class_mask: return [label] if not isinstance(label.geometry, Bitmap): raise RuntimeError('Input class must be a Bitmap.') new_geom = label.geometry.bitwise_mask(full_target_mask, bitwise_op) return [label.clone(geometry=new_geom)] if new_geom is not None else [] res_ann = ann.transform_labels(perform_op) else: res_ann = ann.clone() return res_ann
def apply(augs, meta: ProjectMeta, img, ann: Annotation, segmentation_type='semantic'): # @TODO: save object tags # works for rectangles det_meta, det_mapping = meta.to_detection_task(convert_classes=False) det_ann = ann.to_detection_task(det_mapping) ia_boxes = det_ann.bboxes_to_imgaug() # works for polygons and bitmaps seg_meta, seg_mapping = meta.to_segmentation_task() seg_ann = ann.to_nonoverlapping_masks(seg_mapping) if segmentation_type == 'semantic': seg_ann = seg_ann.to_segmentation_task() class_to_index = { obj_class.name: idx for idx, obj_class in enumerate(seg_meta.obj_classes, start=1) } index_to_class = {v: k for k, v in class_to_index.items()} elif segmentation_type == 'instance': class_to_index = None index_to_class = { idx: label.obj_class.name for idx, label in enumerate(seg_ann.labels, start=1) } elif segmentation_type == 'panoptic': raise NotImplementedError ia_masks = seg_ann.masks_to_imgaug(class_to_index) res_meta = det_meta.merge(seg_meta) # TagMetas should be preserved res_img, res_ia_boxes, res_ia_masks = _apply(augs, img, ia_boxes, ia_masks) res_ann = Annotation.from_imgaug(res_img, ia_boxes=res_ia_boxes, ia_masks=res_ia_masks, index_to_class=index_to_class, meta=res_meta) # add image tags res_ann = res_ann.clone(img_tags=ann.img_tags) return res_meta, res_img, res_ann
def _make_final_ann(self, result_ann): frontend_compatible_labels = _remove_backend_only_labels( result_ann.labels) return Annotation( img_size=result_ann.img_size, labels=frontend_compatible_labels, img_tags=result_ann.img_tags, img_description=result_ann.img_description, pixelwise_scores_labels=result_ann.pixelwise_scores_labels)
def set_annotation(self, ann: Union[Annotation, dict] = None): if ann is not None: if type(ann) is dict: res_ann = Annotation.from_json(ann, self._project_meta) else: res_ann = ann.clone() else: res_ann = None self._ann = res_ann
def _get_annotation_for_bbox(img: np.ndarray, roi: Rectangle, model) -> Annotation: """Runs inference within the given roi; moves resulting figures to global reference frame.""" img_cropped = roi.get_cropped_numpy_slice(img) # TODO pass through image and parent figure tags via roi_ann. roi_ann = Annotation(img_size=(roi.height, roi.width)) raw_result_ann = model.inference(img_cropped, roi_ann) return Annotation(img_size=img.shape[:2], labels=[ label.translate(drow=roi.top, dcol=roi.left) for label in raw_result_ann.labels ], img_tags=raw_result_ann.img_tags, img_description=raw_result_ann.img_description, pixelwise_scores_labels=[ label.translate(drow=roi.top, dcol=roi.left) for label in raw_result_ann.pixelwise_scores_labels ])
def save_project_as_pascal_voc_detection(save_path, project: Project): import pascal_voc_writer # Create root pascal 'datasets' folders for dataset in project.datasets: pascal_dataset_path = os.path.join(save_path, dataset.name) images_dir = os.path.join(pascal_dataset_path, 'JPEGImages') anns_dir = os.path.join(pascal_dataset_path, 'Annotations') lists_dir = os.path.join(pascal_dataset_path, 'ImageSets/Layout') fs_utils.mkdir(pascal_dataset_path) for subdir in ['ImageSets', # Train list, Val list, etc. 'ImageSets/Layout', 'Annotations', 'JPEGImages']: fs_utils.mkdir(os.path.join(pascal_dataset_path, subdir)) samples_by_tags = defaultdict(list) # TRAIN: [img_1, img2, ..] for item_name in dataset: img_path, ann_path = dataset.get_item_paths(item_name) no_ext_name = fs_utils.get_file_name(item_name) pascal_img_path = os.path.join(images_dir, no_ext_name + OUT_IMG_EXT) pascal_ann_path = os.path.join(anns_dir, no_ext_name + XML_EXT) if item_name.endswith(OUT_IMG_EXT): fs_utils.copy_file(img_path, pascal_img_path) else: img = image_utils.read(img_path) image_utils.write(pascal_img_path, img) ann = Annotation.load_json_file(ann_path, project_meta=project.meta) # Read tags for images lists generation for tag in ann.img_tags: samples_by_tags[tag.name].append((no_ext_name ,len(ann.labels))) writer = pascal_voc_writer.Writer(path=pascal_img_path, width=ann.img_size[1], height=ann.img_size[0]) for label in ann.labels: obj_class = label.obj_class rect: Rectangle = label.geometry.to_bbox() writer.addObject(name=obj_class.name, xmin = rect.left, ymin = rect.top, xmax = rect.right, ymax = rect.bottom) writer.save(pascal_ann_path) save_images_lists(lists_dir, samples_by_tags)
def add_item(self, title, image_url, ann: Union[Annotation, dict] = None, col_index=None, custom_info: dict = None, zoom_to_figure=None, title_url=None): if col_index is not None: if col_index <= 0 or col_index > self.col_number: raise ValueError( "Column number is not correct, check your input data") res_ann = Annotation((1, 1)) if ann is not None: if type(ann) is dict: res_ann = Annotation.from_json(ann, self._project_meta) else: res_ann = ann.clone() self._data[title] = { "image_url": image_url, "ann": res_ann, "col_index": col_index } if zoom_to_figure is not None: self._data[title]["zoom_to_figure"] = zoom_to_figure self._need_zoom = True if title_url is not None: self.preview_info = True self._with_title_url = True self._data[title]["labelingUrl"] = title_url if self.preview_info: if custom_info is not None: self._data[title]["info"] = custom_info else: self._data[title]["info"] = None
def add_background(ann: Annotation, bg_class: ObjClass) -> Annotation: """ Adds background rectangle (size equals to image size) to annotation. Args: ann: Input annotation. bg_class: ObjClass instance for background class label. Returns: Annotation with added background rectangle. """ img_size = ann.img_size rect = Rectangle(0, 0, img_size[0] - 1, img_size[1] - 1) new_label = Label(rect, bg_class) return ann.add_label(new_label)
def flipud(img: np.ndarray, ann: Annotation) -> (np.ndarray, Annotation): """ Flips an image array and annotation around horizontal axis. Args: img: Input image array. ann: Input annotation. Returns: A tuple containing flipped image and annotation. """ _validate_image_annotation_shape(img, ann) res_img = sly_image.flipud(img) res_ann = ann.flipud() return res_img, res_ann
def drop_object_by_class(ann: Annotation, classes: List[str]) -> Annotation: """ Removes labels of specified classes from annotation. Args: ann: Input annotation. classes: List of classes to remove. Returns: Annotation with removed labels of specified classes. """ def _filter(label: Label): if label.obj_class.name in classes: return [label] return [] return ann.transform_labels(_filter)
def find_contours(ann: Annotation, classes_mapping: dict) -> Annotation: # @TODO: approximation dropped """ Args: ann: Input annotation. classes_mapping: Dict matching source class names and new ObjClasses Returns: Annotation with Bitmaps converted to contours Polygons. """ def to_contours(label: Label): new_obj_cls = classes_mapping.get(label.obj_class.name) if new_obj_cls is None: return [label] if not isinstance(label.geometry, Bitmap): raise RuntimeError('Input class must be a Bitmap.') return [Label(geometry=geom, obj_class=new_obj_cls) for geom in label.geometry.to_contours()] return ann.transform_labels(to_contours)
def instance_crop(img: np.ndarray, ann: Annotation, class_title: str, save_other_classes_in_crop: bool = True, padding_config: dict = None) -> list: """ Crops objects of specified classes from image with configurable padding. Args: img: Input image array. ann: Input annotation. class_title: Name of class to crop. save_other_classes_in_crop: save non-target classes in each cropped annotation. padding_config: Dict with padding Returns: List of cropped [image, annotation] pairs. """ padding_config = take_with_default(padding_config, {}) _validate_image_annotation_shape(img, ann) results = [] img_rect = Rectangle.from_size(img.shape[:2]) if save_other_classes_in_crop: non_target_labels = [label for label in ann.labels if label.obj_class.name != class_title] else: non_target_labels = [] ann_with_non_target_labels = ann.clone(labels=non_target_labels) for label in ann.labels: if label.obj_class.name == class_title: src_fig_rect = label.geometry.to_bbox() new_img_rect = _rect_from_bounds(padding_config, img_w=src_fig_rect.width, img_h=src_fig_rect.height) rect_to_crop = new_img_rect.translate(src_fig_rect.top, src_fig_rect.left) crops = rect_to_crop.crop(img_rect) if len(crops) == 0: continue rect_to_crop = crops[0] image_crop = sly_image.crop(img, rect_to_crop) cropped_ann = ann_with_non_target_labels.relative_crop(rect_to_crop) label_crops = label.relative_crop(rect_to_crop) for label_crop in label_crops: results.append((image_crop, cropped_ann.add_label(label_crop))) return results
def scale(img: np.ndarray, ann: Annotation, frow: float = None, fcol: float = None, f: float = None) \ -> (np.ndarray, Annotation): """ Resize the input image array and annotation to the given size. Args: img: Input image array. ann: Input annotation. frow: Desired height scale height value frow: Desired width scale width value f: Desired height and width scale values in one Returns: A tuple containing resized image array and annotation. """ _validate_image_annotation_shape(img, ann) new_size = sly_image.restore_proportional_size(in_size=ann.img_size, frow=frow, fcol=fcol, f=f) res_img = sly_image.resize(img, new_size) res_ann = ann.resize(new_size) return res_img, res_ann
def samples_by_tags(required_tags, project): """ Split samples from project by tags :param required_tags: list of tags names :param project: supervisely `Project` class object :return: """ img_annotations_groups = defaultdict(list) for dataset in project: for item_name in dataset: item_paths = dataset.get_item_paths(item_name) ann = Annotation.load_json_file(path=item_paths.ann_path, project_meta=project.meta) img_tags = ann.img_tags for required_tag in required_tags: if img_tags.has_key(required_tag): # TODO migrate to ItemPath objects for img_annotations_groups img_annotations_groups[required_tag].append( (item_paths.img_path, item_paths.ann_path)) return img_annotations_groups
def skeletonize_bitmap(ann: Annotation, classes: List[str], method_id: SkeletonizeMethod) -> Annotation: """ Extracts skeletons from bitmap figures. Args: ann: Input annotation. classes: List of classes to skeletonize. method_id: Algorithm of processing. See supervisely.geometry.bitmap.SkeletonizeMethod enum. Returns: Annotation with skeletonized labels. """ def _skel(label: Label): if label.obj_class.name not in classes: return [label] if not isinstance(label.geometry, Bitmap): raise RuntimeError('Input class must be a Bitmap.') return [label.clone(geometry=label.geometry.skeletonize(method_id))] return ann.transform_labels(_skel)
def ensure_samples_nonempty(samples, tag_name, project_meta): """ Args: samples: list of pairs (image path, annotation path). tag_name: tag name for messages. project_meta: input project meta object. Returns: None """ if len(samples) < 1: raise RuntimeError( 'There are no annotations with tag "{}"'.format(tag_name)) for _, ann_path in samples: ann = Annotation.load_json_file(ann_path, project_meta) if len(ann.labels) > 0: return raise RuntimeError( 'There are no objects in annotations with tag "{}"'.format(tag_name))
def resize(img: np.ndarray, ann: Annotation, size: tuple) -> (np.ndarray, Annotation): """ Resize the input image array and annotation to the given size. Args: img: Input image array. ann: Input annotation. size: Desired size (height, width) in pixels or -1. If one of values is -1 and "keep": true then for specific width height will be automatically computed to keep aspect ratio. Returns: A tuple containing resized image array and annotation. """ _validate_image_annotation_shape(img, ann) height = take_with_default(size[0], -1) # For backward capability width = take_with_default(size[1], -1) size = (height, width) new_size = sly_image.restore_proportional_size(in_size=ann.img_size, out_size=size) res_img = sly_image.resize(img, new_size) res_ann = ann.resize(new_size) return res_img, res_ann
def approximate_vector(ann: Annotation, classes: List[str], epsilon: float) -> Annotation: """ Approximates vector figures: lines and polygons. Args: ann: Input annotations. classes: List of classes to apply transformation. epsilon: Approximation accuracy (maximum distance between the original curve and its approximation) Returns: Annotation with approximated vector figures of selected classes. """ def _approx(label: Label): if label.obj_class.name not in classes: return [label] if not isinstance(label.geometry, (Polygon, Polyline)): raise RuntimeError('Input class must be a Polygon or a Line.') return [label.clone(geometry=label.geometry.approx_dp(epsilon))] return ann.transform_labels(_approx)
def _do_infer_annotate(self, img: np.ndarray, ann: Annotation) -> Annotation: result_ann = ann.clone() model_labels = [] roi_bbox_labels = [] for roi in self._sliding_windows.get(ann.img_size): raw_roi_ann = _get_annotation_for_bbox(img, roi, self._model) # Accumulate all the labels across the sliding windows to potentially run non-max suppression over them. # Only retain the classes that will be eventually saved to avoid running NMS on objects we will # throw away anyway. model_labels.extend([ label for label in raw_roi_ann.labels if isinstance(label.geometry, Rectangle) and self._model_class_mapper.map(label.obj_class) is not None ]) model_img_level_tags = make_renamed_tags( raw_roi_ann.img_tags, self._model_tag_meta_mapper, skip_missing=True) roi_bbox_labels.extend( _maybe_make_bbox_label(roi, self._intermediate_bbox_class, tags=model_img_level_tags)) nms_conf = self._config.get(NMS_AFTER, {ENABLE: False}) if nms_conf[ENABLE]: confidence_tag_name = nms_conf.get(CONFIDENCE_TAG_NAME, CONFIDENCE) model_labels = self._general_nms( labels=model_labels, iou_thresh=nms_conf[IOU_THRESHOLD], confidence_tag_name=confidence_tag_name) model_labels_renamed = _replace_or_drop_labels_classes( model_labels, self._model_class_mapper, self._model_tag_meta_mapper) result_ann = result_ann.add_labels(roi_bbox_labels + model_labels_renamed) return result_ann
def crop(img: np.ndarray, ann: Annotation, top_pad: int = 0, left_pad: int = 0, bottom_pad: int = 0, right_pad: int = 0) -> (np.ndarray, Annotation): """ Crops the given image array and annotation from all sides with the given values. Args: img: Input image array. ann: Input annotation. top_pad: The size in pixels of the piece of picture that will be cut from the top side. left_pad: The size in pixels of the piece of picture that will be cut from the left side. bottom_pad: The size in pixels of the piece of picture that will be cut from the bottom side. right_pad: The size in pixels of the piece of picture that will be cut from the right side. Returns: A tuple containing cropped image array and annotation. """ _validate_image_annotation_shape(img, ann) height, width = img.shape[:2] crop_rect = Rectangle(top_pad, left_pad, height - bottom_pad - 1, width - right_pad - 1) res_img = sly_image.crop(img, crop_rect) res_ann = ann.relative_crop(crop_rect) return res_img, res_ann
def _do_infer_annotate_generic(self, inference_fn, img, ann: Annotation): result_ann = ann.clone() inference_ann = inference_fn(img, ann) result_labels = _replace_or_drop_labels_classes( inference_ann.labels, self._model_class_mapper, self._model_tag_meta_mapper) result_ann = result_ann.add_labels(result_labels) renamed_tags = make_renamed_tags(inference_ann.img_tags, self._model_tag_meta_mapper, skip_missing=True) result_ann = result_ann.add_tags(renamed_tags) if self._config.get(SAVE_PROBABILITIES, False) is True: result_problabels = _replace_or_drop_labels_classes( inference_ann.pixelwise_scores_labels, self._model_class_mapper, self._model_tag_meta_mapper) result_ann = result_ann.add_pixelwise_score_labels( result_problabels) return result_ann
def _do_infer_annotate(self, img: np.ndarray, ann: Annotation) -> Annotation: result_labels = [] result_problabels = [] for src_label, roi in self._all_filtered_bbox_rois( ann, self._config[FROM_CLASSES], self._config[PADDING]): if roi is None: result_labels.append(src_label) else: roi_ann = _get_annotation_for_bbox(img, roi, self._model) result_labels.extend( _replace_or_drop_labels_classes( roi_ann.labels, self._model_class_mapper, self._model_tag_meta_mapper)) if self._config.get(SAVE_PROBABILITIES, False) is True: result_problabels.extend( _replace_or_drop_labels_classes( roi_ann.pixelwise_scores_labels, self._model_class_mapper, self._model_tag_meta_mapper)) model_img_level_tags = make_renamed_tags( roi_ann.img_tags, self._model_tag_meta_mapper, skip_missing=True) if self._config[SAVE]: result_labels.append( Label(geometry=roi, obj_class=self._intermediate_class_mapper.map( src_label.obj_class), tags=model_img_level_tags)) # Regardless of whether we need to save intermediate bounding boxes, also put the inference result tags # onto the original source object from which we created a bounding box. # This is necessary for e.g. classification models to work, so that they put the classification results # onto the original object. result_labels.append(src_label.add_tags(model_img_level_tags)) return ann.clone(labels=result_labels, pixelwise_scores_labels=result_problabels)
def test_with_matches(self): ann = Annotation( img_size=[100, 100], labels=[self._gt_obj_1, self._gt_obj_2, self._gt_obj_3, self._pred_obj_1, self._pred_obj_2, self._pred_obj_3] + self._pred_objs_fp) self._metric_calculator.add_pair(ann, ann) # Sorted matches by confidence: # 0.75 - recall 0 precision 0 # 0.7 + recall 1/3 precision 1/2 # 0.65 - recall 1/3 precision 1/3 # 0.6 - recall 1/3 precision 1/4 # 0.55 - recall 1/3 precision 1/5 # 0.45 - recall 1/3 precision 1/6 # 0.35 - recall 1/3 precision 1/7 # 0.25 - recall 1/3 precision 1/8 # 0.15 - recall 2/3 precision 1/9 # 0.1 + recall 2/3 precision 2/10 # Recalls 0.7, 0.8, 0.9, 1.0 -> max precision 0. # Recalls 0.6, 0.5, 0.4 -> max precision 2/10 # Recalls 0.3, 0.2, 0.1, 0.0 -> max precision 1/2 expected_map = (4 * 0.0 + 3 * (2/10) + 4 * 1/2) / 11 self.assertEqual(self._metric_calculator.get_total_metrics()[AP], expected_map)
def test_empty_predictions(self): ann = Annotation( img_size=[100, 100], labels=[self._gt_obj_1, self._gt_obj_2, self._gt_obj_3]) self._metric_calculator.add_pair(ann, ann) self.assertEqual(self._metric_calculator.get_total_metrics()[AP], 0)