Ejemplo n.º 1
0
    def _get_update_mask(self, allow_empty_mask=False):
        mask_paths = []
        for field_path in self.top_level_paths:
            if field_path not in self.transform_paths:
                mask_paths.append(field_path.to_api_repr())
            else:
                prefix = FieldPath(*field_path.parts[:-1])
                if prefix.parts:
                    mask_paths.append(prefix.to_api_repr())

        return common_pb2.DocumentMask(field_paths=mask_paths)
Ejemplo n.º 2
0
    def _get_update_mask(self, allow_empty_mask=False):
        mask_paths = []
        for field_path in self.top_level_paths:
            if field_path not in self.transform_paths:
                mask_paths.append(field_path.to_api_repr())
            else:
                prefix = FieldPath(*field_path.parts[:-1])
                if prefix.parts:
                    mask_paths.append(prefix.to_api_repr())

        return common_pb2.DocumentMask(field_paths=mask_paths)
Ejemplo n.º 3
0
    def __init__(self, document_data):
        self.document_data = document_data
        self.field_paths = []
        self.deleted_fields = []
        self.server_timestamps = []
        self.array_removes = {}
        self.array_unions = {}
        self.set_fields = {}
        self.empty_document = False

        prefix_path = FieldPath()
        iterator = self._get_document_iterator(prefix_path)

        for field_path, value in iterator:

            if field_path == prefix_path and value is _EmptyDict:
                self.empty_document = True

            elif value is transforms.DELETE_FIELD:
                self.deleted_fields.append(field_path)

            elif value is transforms.SERVER_TIMESTAMP:
                self.server_timestamps.append(field_path)

            elif isinstance(value, transforms.ArrayRemove):
                self.array_removes[field_path] = value.values

            elif isinstance(value, transforms.ArrayUnion):
                self.array_unions[field_path] = value.values

            else:
                self.field_paths.append(field_path)
                set_field_value(self.set_fields, field_path, value)
Ejemplo n.º 4
0
def extract_fields(document_data, prefix_path, expand_dots=False):
    """Do depth-first walk of tree, yielding field_path, value"""
    if not document_data:
        yield prefix_path, _EmptyDict
    else:
        for key, value in sorted(six.iteritems(document_data)):

            if expand_dots:
                sub_key = FieldPath.from_string(key)
            else:
                sub_key = FieldPath(key)

            field_path = FieldPath(*(prefix_path.parts + sub_key.parts))

            if isinstance(value, dict):
                for s_path, s_value in extract_fields(value, field_path):
                    yield s_path, s_value
            else:
                yield field_path, value
Ejemplo n.º 5
0
    def has_updates(self):
        # for whatever reason, the conformance tests want to see the parent
        # of nested transform paths in the update mask
        # (see set-st-merge-nonleaf-alone.textproto)
        update_paths = set(self.data_merge)

        for transform_path in self.transform_paths:
            if len(transform_path.parts) > 1:
                parent_fp = FieldPath(*transform_path.parts[:-1])
                update_paths.add(parent_fp)

        return bool(update_paths)
Ejemplo n.º 6
0
    def __init__(self, document_data):
        super(DocumentExtractorForUpdate, self).__init__(document_data)
        self.top_level_paths = sorted(
            [FieldPath.from_string(key) for key in document_data])
        tops = set(self.top_level_paths)
        for top_level_path in self.top_level_paths:
            for ancestor in top_level_path.lineage():
                if ancestor in tops:
                    raise ValueError("Conflicting field path: {}, {}".format(
                        top_level_path, ancestor))

        for field_path in self.deleted_fields:
            if field_path not in tops:
                raise ValueError(
                    "Cannot update with nest delete: {}".format(field_path))
Ejemplo n.º 7
0
    def _recursive_delete(
        self,
        reference: Union[CollectionReference, DocumentReference],
        *,
        bulk_writer: Optional["BulkWriter"] = None,
        chunk_size: Optional[int] = 5000,
        depth: Optional[int] = 0,
    ) -> int:
        """Recursion helper for `recursive_delete."""
        from google.cloud.firestore_v1.bulk_writer import BulkWriter

        bulk_writer = bulk_writer or BulkWriter()

        num_deleted: int = 0

        if isinstance(reference, CollectionReference):
            chunk: List[DocumentSnapshot]
            for chunk in (reference.recursive().select(
                [FieldPath.document_id()])._chunkify(chunk_size)):
                doc_snap: DocumentSnapshot
                for doc_snap in chunk:
                    num_deleted += 1
                    bulk_writer.delete(doc_snap.reference)

        elif isinstance(reference, DocumentReference):
            col_ref: CollectionReference
            for col_ref in reference.collections():
                num_deleted += self._recursive_delete(
                    col_ref,
                    bulk_writer=bulk_writer,
                    chunk_size=chunk_size,
                    depth=depth + 1,
                )
            num_deleted += 1
            bulk_writer.delete(reference)

        else:
            raise TypeError(
                f"Unexpected type for reference: {reference.__class__.__name__}"
            )

        if depth == 0:
            bulk_writer.close()

        return num_deleted
Ejemplo n.º 8
0
def extract_fields(document_data, prefix_path, expand_dots=False):
    """Do depth-first walk of tree, yielding field_path, value"""
    if not document_data:
        yield prefix_path, _EmptyDict
    else:
        for key, value in sorted(six.iteritems(document_data)):

            if expand_dots:
                sub_key = FieldPath.from_string(key)
            else:
                sub_key = FieldPath(key)

            field_path = FieldPath(*(prefix_path.parts + sub_key.parts))

            if isinstance(value, dict):
                for s_path, s_value in extract_fields(value, field_path):
                    yield s_path, s_value
            else:
                yield field_path, value
Ejemplo n.º 9
0
    async def _recursive_delete(
        self,
        reference: Union[AsyncCollectionReference, AsyncDocumentReference],
        bulk_writer: "BulkWriter",
        *,
        chunk_size: Optional[int] = 5000,
        depth: Optional[int] = 0,
    ) -> int:
        """Recursion helper for `recursive_delete."""

        num_deleted: int = 0

        if isinstance(reference, AsyncCollectionReference):
            chunk: List[DocumentSnapshot]
            async for chunk in reference.recursive().select(
                [FieldPath.document_id()])._chunkify(chunk_size):
                doc_snap: DocumentSnapshot
                for doc_snap in chunk:
                    num_deleted += 1
                    bulk_writer.delete(doc_snap.reference)

        elif isinstance(reference, AsyncDocumentReference):
            col_ref: AsyncCollectionReference
            async for col_ref in reference.collections():
                num_deleted += await self._recursive_delete(
                    col_ref,
                    bulk_writer=bulk_writer,
                    depth=depth + 1,
                    chunk_size=chunk_size,
                )
            num_deleted += 1
            bulk_writer.delete(reference)

        else:
            raise TypeError(
                f"Unexpected type for reference: {reference.__class__.__name__}"
            )

        if depth == 0:
            bulk_writer.close()

        return num_deleted
Ejemplo n.º 10
0
    def __init__(self, document_data):
        super(DocumentExtractorForUpdate, self).__init__(document_data)
        self.top_level_paths = sorted(
            [FieldPath.from_string(key) for key in document_data]
        )
        tops = set(self.top_level_paths)
        for top_level_path in self.top_level_paths:
            for ancestor in top_level_path.lineage():
                if ancestor in tops:
                    raise ValueError(
                        "Conflicting field path: {}, {}".format(
                            top_level_path, ancestor
                        )
                    )

        for field_path in self.deleted_fields:
            if field_path not in tops:
                raise ValueError(
                    "Cannot update with nest delete: {}".format(field_path)
                )
Ejemplo n.º 11
0
 def _construct_merge_paths(self, merge):
     for merge_field in merge:
         if isinstance(merge_field, FieldPath):
             yield merge_field
         else:
             yield FieldPath(*parse_field_path(merge_field))
Ejemplo n.º 12
0
 def _construct_merge_paths(self, merge) -> Generator[Any, Any, None]:
     for merge_field in merge:
         if isinstance(merge_field, FieldPath):
             yield merge_field
         else:
             yield FieldPath(*parse_field_path(merge_field))