def __init__(self, target, pipeline, full_document, resume_after, max_await_time_ms, batch_size, collation, start_at_operation_time, session): if pipeline is None: pipeline = [] elif not isinstance(pipeline, list): raise TypeError("pipeline must be a list") common.validate_string_or_none('full_document', full_document) validate_collation_or_none(collation) common.validate_non_negative_integer_or_none("batchSize", batch_size) self._decode_custom = False self._orig_codec_options = target.codec_options if target.codec_options.type_registry._decoder_map: self._decode_custom = True # Keep the type registry so that we support encoding custom types # in the pipeline. self._target = target.with_options( codec_options=target.codec_options.with_options( document_class=RawBSONDocument)) else: self._target = target self._pipeline = copy.deepcopy(pipeline) self._full_document = full_document self._resume_token = copy.deepcopy(resume_after) self._max_await_time_ms = max_await_time_ms self._batch_size = batch_size self._collation = collation self._start_at_operation_time = start_at_operation_time self._session = session self._cursor = self._create_cursor()
def __init__( self, target: Union["MongoClient[_DocumentType]", "Database[_DocumentType]", "Collection[_DocumentType]"], pipeline: Optional[_Pipeline], full_document: Optional[str], resume_after: Optional[Mapping[str, Any]], max_await_time_ms: Optional[int], batch_size: Optional[int], collation: Optional[_CollationIn], start_at_operation_time: Optional[Timestamp], session: Optional["ClientSession"], start_after: Optional[Mapping[str, Any]], comment: Optional[Any] = None, ) -> None: if pipeline is None: pipeline = [] pipeline = common.validate_list("pipeline", pipeline) common.validate_string_or_none("full_document", full_document) validate_collation_or_none(collation) common.validate_non_negative_integer_or_none("batchSize", batch_size) self._decode_custom = False self._orig_codec_options = target.codec_options if target.codec_options.type_registry._decoder_map: self._decode_custom = True # Keep the type registry so that we support encoding custom types # in the pipeline. self._target = target.with_options( # type: ignore codec_options=target.codec_options.with_options( document_class=RawBSONDocument)) else: self._target = target self._pipeline = copy.deepcopy(pipeline) self._full_document = full_document self._uses_start_after = start_after is not None self._uses_resume_after = resume_after is not None self._resume_token = copy.deepcopy(start_after or resume_after) self._max_await_time_ms = max_await_time_ms self._batch_size = batch_size self._collation = collation self._start_at_operation_time = start_at_operation_time self._session = session self._comment = comment # Initialize cursor. self._cursor = self._create_cursor()
def __init__(self, target, pipeline, full_document, resume_after, max_await_time_ms, batch_size, collation, start_at_operation_time, session): if pipeline is None: pipeline = [] elif not isinstance(pipeline, list): raise TypeError("pipeline must be a list") common.validate_string_or_none('full_document', full_document) validate_collation_or_none(collation) common.validate_non_negative_integer_or_none("batchSize", batch_size) self._target = target self._pipeline = copy.deepcopy(pipeline) self._full_document = full_document self._resume_token = copy.deepcopy(resume_after) self._max_await_time_ms = max_await_time_ms self._batch_size = batch_size self._collation = collation self._start_at_operation_time = start_at_operation_time self._session = session self._cursor = self._create_cursor()