def test_transaction_write_concern_override(self): """Test txn overrides Client/Database/Collection write_concern.""" client = rs_client(w=0) self.addCleanup(client.close) db = client.test coll = db.test coll.insert_one({}) with client.start_session() as s: with s.start_transaction(write_concern=WriteConcern(w=1)): self.assertTrue(coll.insert_one({}, session=s).acknowledged) self.assertTrue( coll.insert_many([{}, {}], session=s).acknowledged) self.assertTrue( coll.bulk_write([InsertOne({})], session=s).acknowledged) self.assertTrue( coll.replace_one({}, {}, session=s).acknowledged) self.assertTrue( coll.update_one({}, { "$set": { "a": 1 } }, session=s).acknowledged) self.assertTrue( coll.update_many({}, { "$set": { "a": 1 } }, session=s).acknowledged) self.assertTrue(coll.delete_one({}, session=s).acknowledged) self.assertTrue(coll.delete_many({}, session=s).acknowledged) coll.find_one_and_delete({}, session=s) coll.find_one_and_replace({}, {}, session=s) coll.find_one_and_update({}, {"$set": {"a": 1}}, session=s) unsupported_txn_writes = [ (client.drop_database, [db.name], {}), (db.create_collection, ['collection'], {}), (db.drop_collection, ['collection'], {}), (coll.drop, [], {}), (coll.map_reduce, ['function() {}', 'function() {}', 'output'], {}), (coll.rename, ['collection2'], {}), # Drop collection2 between tests of "rename", above. (coll.database.drop_collection, ['collection2'], {}), (coll.create_indexes, [[IndexModel('a')]], {}), (coll.create_index, ['a'], {}), (coll.drop_index, ['a_1'], {}), (coll.drop_indexes, [], {}), (coll.aggregate, [[{ "$out": "aggout" }]], {}), ] for op in unsupported_txn_writes: op, args, kwargs = op with client.start_session() as s: kwargs['session'] = s s.start_transaction(write_concern=WriteConcern(w=1)) with self.assertRaises(OperationFailure): op(*args, **kwargs) s.abort_transaction()
def test_transaction_options_validation(self): default_options = TransactionOptions() self.assertIsNone(default_options.read_concern) self.assertIsNone(default_options.write_concern) self.assertIsNone(default_options.read_preference) self.assertIsNone(default_options.max_commit_time_ms) # No error when valid options are provided. TransactionOptions(read_concern=ReadConcern(), write_concern=WriteConcern(), read_preference=ReadPreference.PRIMARY, max_commit_time_ms=10000) with self.assertRaisesRegex(TypeError, "read_concern must be "): TransactionOptions(read_concern={}) with self.assertRaisesRegex(TypeError, "write_concern must be "): TransactionOptions(write_concern={}) with self.assertRaisesRegex( ConfigurationError, "transactions do not support unacknowledged write concern"): TransactionOptions(write_concern=WriteConcern(w=0)) with self.assertRaisesRegex(TypeError, "is not valid for read_preference"): TransactionOptions(read_preference={}) with self.assertRaisesRegex( TypeError, "max_commit_time_ms must be an integer or None"): TransactionOptions(max_commit_time_ms="10000")
async def test_write_concern(self): # Default empty dict means "w=1" self.assertEqual(WriteConcern(), self.cx.write_concern) await self.collection.delete_many({}) await self.collection.insert_one({'_id': 0}) for gle_options in [ {}, {'w': 0}, {'w': 1}, {'wtimeout': 1000}, ]: cx = self.asyncio_client(test.env.uri, **gle_options) wc = WriteConcern(**gle_options) self.assertEqual(wc, cx.write_concern) db = cx.motor_test self.assertEqual(wc, db.write_concern) collection = db.test_collection self.assertEqual(wc, collection.write_concern) if wc.acknowledged: with self.assertRaises(pymongo.errors.DuplicateKeyError): await collection.insert_one({'_id': 0}) else: await collection.insert_one({'_id': 0}) # No error # No error c = collection.with_options(write_concern=WriteConcern(w=0)) await c.insert_one({'_id': 0}) cx.close()
async def test_causal_consistency(self): # Causal consistency examples client = self.cx self.addCleanup(env.sync_cx.drop_database, 'test') await client.test.drop_collection('items') await client.test.items.insert_one({ 'sku': "111", 'name': 'Peanuts', 'start': datetime.datetime.today()}) # Start Causal Consistency Example 1 async with await client.start_session(causal_consistency=True) as s1: current_date = datetime.datetime.today() items = client.get_database( 'test', read_concern=ReadConcern('majority'), write_concern=WriteConcern('majority', wtimeout=1000)).items await items.update_one( {'sku': "111", 'end': None}, {'$set': {'end': current_date}}, session=s1) await items.insert_one( {'sku': "nuts-111", 'name': "Pecans", 'start': current_date}, session=s1) # End Causal Consistency Example 1 # Start Causal Consistency Example 2 async with await client.start_session(causal_consistency=True) as s2: s2.advance_cluster_time(s1.cluster_time) s2.advance_operation_time(s1.operation_time) items = client.get_database( 'test', read_preference=ReadPreference.SECONDARY, read_concern=ReadConcern('majority'), write_concern=WriteConcern('majority', wtimeout=1000)).items async for item in items.find({'end': None}, session=s2): print(item)
def replace_with_correct_contig(mongo_source, assembly_accession, study_accession, incorrect_contig, correct_contig, num_variants_to_replace): sve_collection = mongo_source.mongo_handle[mongo_source.db_name]["submittedVariantEntity"] filter_criteria = {'seq': assembly_accession, 'study': study_accession, 'contig': incorrect_contig} cursor = sve_collection.with_options(read_concern=ReadConcern("majority")) \ .find(filter_criteria, no_cursor_timeout=True).limit(num_variants_to_replace) insert_statements = [] drop_statements = [] total_inserted, total_dropped = 0, 0 try: for variant in cursor: original_id = get_SHA1(variant) assert variant['_id'] == original_id, "Original id is different from the one calculated %s != %s" % ( variant['_id'], original_id) variant['contig'] = correct_contig variant['_id'] = get_SHA1(variant) insert_statements.append(pymongo.InsertOne(variant)) drop_statements.append(pymongo.DeleteOne({'_id': original_id})) result_insert = sve_collection.with_options(write_concern=WriteConcern(w="majority", wtimeout=1200000)) \ .bulk_write(requests=insert_statements, ordered=False) total_inserted += result_insert.inserted_count result_drop = sve_collection.with_options(write_concern=WriteConcern(w="majority", wtimeout=1200000)) \ .bulk_write(requests=drop_statements, ordered=False) total_dropped += result_drop.deleted_count logger.info('%s / %s new documents inserted' % (total_inserted, num_variants_to_replace)) logger.info('%s / %s old documents dropped' % (total_dropped, num_variants_to_replace)) except Exception as e: print(traceback.format_exc()) raise e finally: cursor.close() return total_inserted
def swap_with_correct_contig(mongo_source, contig_swap_list): sve_collection = mongo_source.mongo_handle[mongo_source.db_name]["submittedVariantEntity"] insert_statements = [] drop_statements = [] for contig in contig_swap_list: contig_insert_stmt_1, contig_drop_stmt_1 = get_insert_statements(sve_collection, contig['contig_1'], contig['contig_2']) contig_insert_stmt_2, contig_drop_stmt_2 = get_insert_statements(sve_collection, contig['contig_2'], contig['contig_1']) insert_statements.extend(contig_insert_stmt_1) insert_statements.extend(contig_insert_stmt_2) drop_statements.extend(contig_drop_stmt_1) drop_statements.extend(contig_drop_stmt_2) total_inserted = 0 total_dropped = 0 try: result_insert = sve_collection.with_options(write_concern=WriteConcern(w="majority", wtimeout=1200000)) \ .bulk_write(requests=insert_statements, ordered=False) total_inserted += result_insert.inserted_count logger.info('%s / %s new documents inserted' % (total_inserted, 146)) result_drop = sve_collection.with_options(write_concern=WriteConcern(w="majority", wtimeout=1200000)) \ .bulk_write(requests=drop_statements, ordered=False) total_dropped += result_drop.deleted_count logger.info('%s / %s old documents dropped' % (total_dropped, 146)) except Exception as e: print(traceback.format_exc()) raise e
def test_transaction_options_validation(self): default_options = TransactionOptions() self.assertIsNone(default_options.read_concern) self.assertIsNone(default_options.write_concern) TransactionOptions(read_concern=ReadConcern(), write_concern=WriteConcern()) with self.assertRaisesRegex(TypeError, "read_concern must be "): TransactionOptions(read_concern={}) with self.assertRaisesRegex(TypeError, "write_concern must be "): TransactionOptions(write_concern={}) with self.assertRaisesRegex( ConfigurationError, "transactions must use an acknowledged write concern"): TransactionOptions(write_concern=WriteConcern(w=0))
async def test_causal_consistency(self): # Causal consistency examples client = self.cx self.addCleanup(env.sync_cx.drop_database, "test") await client.test.drop_collection("items") await client.test.items.insert_one({ "sku": "111", "name": "Peanuts", "start": datetime.datetime.today() }) # Start Causal Consistency Example 1 async with await client.start_session(causal_consistency=True) as s1: current_date = datetime.datetime.today() items = client.get_database( "test", read_concern=ReadConcern("majority"), write_concern=WriteConcern("majority", wtimeout=1000), ).items await items.update_one({ "sku": "111", "end": None }, {"$set": { "end": current_date }}, session=s1) await items.insert_one( { "sku": "nuts-111", "name": "Pecans", "start": current_date }, session=s1) # End Causal Consistency Example 1 # Start Causal Consistency Example 2 async with await client.start_session(causal_consistency=True) as s2: s2.advance_cluster_time(s1.cluster_time) s2.advance_operation_time(s1.operation_time) items = client.get_database( "test", read_preference=ReadPreference.SECONDARY, read_concern=ReadConcern("majority"), write_concern=WriteConcern("majority", wtimeout=1000), ).items async for item in items.find({"end": None}, session=s2): print(item)
async def update_employee_info(session): employees_coll = session.client.hr.employees events_coll = session.client.reporting.events async with session.start_transaction( read_concern=ReadConcern("snapshot"), write_concern=WriteConcern(w="majority"), read_preference=ReadPreference.PRIMARY, ): await employees_coll.update_one( {"employee": 3}, {"$set": { "status": "Inactive" }}, session=session) await events_coll.insert_one( { "employee": 3, "status": { "new": "Inactive", "old": "Active" } }, session=session) await commit_with_retry(session)
def log_ne(op='', level="MSG", obj=None): doc = {'time': datetime.datetime.now(), 'level': level, 'op': op} if obj and 'msg' in obj: doc['msg'] = obj['msg'] del obj['msg'] doc['obj'] = obj db.logs._insert(doc, write_concern=WriteConcern(w=0))
async def create_user(app, username, hashed_password=None, permissions=None, cards=None): """Create a single new user. Parameters ---------- app : aiohttp.web.Application The aiohttp application instance username : str The username for the new user hashed_password : str The argon2id hashed password for the new user permissions : list The list of permissions the new user will have cards : list The list of cards assigned to the new user Returns ------- uid : str The uid of the new user """ permissions = permissions if permissions else [] cards = cards if cards else [] result = (await app["db"].users.with_options(write_concern=WriteConcern( w="majority")).insert_one({ "username": username, "password": hashed_password, "permissions": permissions, "cards": cards, })) return str(result.inserted_id)
def cleanup_metadata(settings_xml_file, db_name): logger.info(f'Cleaning up {db_name}...') with get_mongo_connection_handle('production', settings_xml_file) as mongo_conn: db = mongo_conn[db_name] metadata_collection = db[annotation_metadata_collection_name] temp_collection = db[temp_collection_name] majority_read = ReadConcern('majority') majority_write = WriteConcern(w='majority', wtimeout=1200000) query = {'ct': {'$exists': True}} results = [ x for x in metadata_collection.with_options( read_concern=majority_read).find(query, no_cursor_timeout=True) ] insert_result = temp_collection.with_options( write_concern=majority_write).insert_many(results) logger.info( f'Inserted {len(insert_result.inserted_ids)} documents into {temp_collection_name}' ) delete_result = metadata_collection.with_options( write_concern=majority_write).delete_many(query) logger.info( f'Deleted {delete_result.deleted_count} documents from {annotation_metadata_collection_name}' ) metadata_collection.drop_indexes() logger.info( f'Dropped non-id indexes from {annotation_metadata_collection_name}' )
def _setup_logging_dataset(store, dsname, logger, collection=None, size=10 * 1024 * 1024, reset=False): # setup the dataset assert dsname, 'need a valid dsname, got {}'.format(dsname) if reset: store.drop(dsname, force=True) collection = collection or store.collection(dsname) # https://api.mongodb.com/python/current/api/pymongo/write_concern.html#pymongo.write_concern.WriteConcern FireAndForget = WriteConcern(w=0) ReadFast = ReadConcern('local') collection = collection.with_options(write_concern=FireAndForget, read_concern=ReadFast) store.put(collection, dsname) if collection.estimated_document_count() == 0: # initialize. we insert directly into the collection because the logger instance is not set up yet record = _make_record('SYSTEM', 999, 'system', 'log init', 'log init') collection.insert_one(record) store.mongodb.command('convertToCapped', collection.name, size=size) # ensure indexed idxs = collection.index_information() for idx in ('levelname', 'levelno', 'created'): if idx not in idxs: collection.create_index(idx) return collection
def test_parallel_scan(self): if not (yield from at_least(self.cx, (2, 5, 5))): raise SkipTest("Requires MongoDB >= 2.5.5") yield from skip_if_mongos(self.cx) collection = self.collection.with_options( write_concern=WriteConcern(test.env.w)) # Enough documents that each cursor requires multiple batches. yield from collection.delete_many({}) yield from collection.insert_many(({'_id': i} for i in range(8000))) if test.env.is_replica_set: # Test that getMore messages are sent to the right server. client = self.asyncio_rsc(read_preference=Secondary()) collection = client.motor_test.test_collection docs = [] @asyncio.coroutine def f(cursor): self.assertTrue(isinstance(cursor, AsyncIOMotorCommandCursor)) while (yield from cursor.fetch_next): docs.append(cursor.next_object()) cursors = yield from collection.parallel_scan(3) yield from asyncio.wait([f(cursor) for cursor in cursors], loop=self.loop) self.assertEqual(len(docs), (yield from collection.count()))
def test_unsuccessful_insert_many(self, tracer, client): db_name = self.random_string() collection_name = self.random_string() # requiring replication on standalone will cause failure write_concern = WriteConcern(w=3) collection = client[db_name].get_collection( collection_name, write_concern=write_concern) docs = [{self.random_string(): self.random_string() for _ in range(5)} for __ in range(5)] with pytest.raises(OperationFailure): collection.insert_many(docs) spans = tracer.finished_spans() assert len(spans) == 1 span = spans[0] assert span.operation_name == 'insert' assert span.tags['namespace'] == self.namespace( db_name, collection_name) assert span.tags['custom'] == 'tag' assert span.tags['command.name'] == 'insert' assert span.tags[tags.COMPONENT] == 'PyMongo' assert span.tags[tags.DATABASE_INSTANCE] == db_name assert span.tags['reported_duration'] assert span.tags[tags.ERROR] is True expected_failure = dict( code=2, codeName='BadValue', ok=0.0, errmsg="cannot use 'w' > 1 when a host is not replicated") assert json.loads(span.tags['event.failure']) == expected_failure assert 'event.reply' not in span.tags
def __init__(self, url): self._client = MongoClient(url) self._db = self._client[DB_NAME] self._data_collection = self._db[DATA_COLL_NAME] self._reviews_collection = self._db[REVIEWS_COLL_NAME] self._analytics_collection = self._db.get_collection( ANALYTICS_COLL_NAME, write_concern=WriteConcern(w=0))
def func2(): """ 批量插入文档函数 """ client = MongoClient('mongodb://127.0.0.1:27017/') # 建立连接 collection = client['blogdb'].get_collection( 'posts', write_concern=WriteConcern(w=1, j=True, wtimeout=1)) # 选择集合 # write_concern控制何时调用getLastError() # write_concern=1:mongod在写入内存之后,返回响应 # write_concern=1 & journal:true:mongod在写入内存、journal日志之后,返回响应 # write_concern=2:在集群模式生效,2时表示只有secondary从primary完成复制之后,返回响应 try: insertData = [InsertOne({'title': i}) for i in range(4)] # 插入文档 otherData = [ DeleteMany({}), # Remove all documents. InsertOne({'_id': 1}), InsertOne({'_id': 2}), InsertOne({'_id': 3}), UpdateOne({'_id': 1}, {'$set': { 'foo': 'bar' }}), UpdateOne({'_id': 4}, {'$inc': { 'j': 1 }}, upsert=True), ReplaceOne({'j': 1}, {'j': 2}), DeleteOne({'_id': 2}) ] collection.bulk_write(otherData + insertData, ordered=True) except BulkWriteError as bwe: print(bwe.details)
def add(cls, **kwargs) -> dict: """ 添加角色 :param kwargs: :return: """ mes = {"message": "success"} role_name = kwargs.get("role_name", '') db = orm_module.get_client() conn = orm_module.get_conn(table_name=cls.get_table_name(), db_client=db) write_concern = WriteConcern(w=1, j=True) with db.start_session(causal_consistency=True) as ses: with ses.start_transaction(write_concern=write_concern): r = conn.find_one(filter={'role_name': role_name}) if r is not None: ms = "角色 {} 已存在!".format(role_name) mes['message'] = ms else: """添加""" r = conn.insert_one(kwargs) if r is None: ms = "保存用户账户失败" mes['message'] = ms else: pass return mes
async def bulk_op(self, collection: str, items: list, ordered: bool=False): coll = self._data_db.get_collection( collection, write_concern=WriteConcern(w=0, wtimeout=2) # dont ack ) requests = [] for item in items: data_item = item.data data_item_dict = data_item.to_dict() unique_fields = data_item.get_unique_indexes() if not unique_fields or not collection.endswith('kline'): # don't upsert trades requests.append(InsertOne(data_item_dict)) else: # upsert is too expensive, only do it for kline upsert_op = ReplaceOne( { f: data_item_dict[f] for f in unique_fields }, data_item_dict, upsert=True ) requests.append(upsert_op) ordered = ordered if not collection.endswith('kline') else True result = await coll.bulk_write(requests, ordered=ordered) if result.acknowledged: msg = LogMsgFmt.MONGO_OPS.value.format( f"{collection}|{result.bulk_api_result}" ) if result.bulk_api_result['writeErrors']: self.logger.warning(msg) else: self.logger.debug(msg)
def setUp(self): self.server = MockupDB(auto_ismaster=True) self.server.run() self.addCleanup(self.server.stop) self.client = MongoClient(self.server.uri) self.collection = self.client.db.get_collection( 'collection', write_concern=WriteConcern(w=0))
def pair_list2mongodb(cls, pair_list): logger = HenriqueLogger.func_level2logger(cls.pair_list2mongodb, logging.DEBUG) logger.debug({"len(pair_list)": len(pair_list)}) n = len(pair_list) write_concern = WriteConcern(w=3, wtimeout=n) def upsert_channel_user(): channel_user_list_raw = lfilter(is_not_none, map(ig(1), pair_list)) logger.debug({"len(channel_user_list_raw)": len(channel_user_list_raw)}) # raise Exception({"channel_user_list_raw[0]": channel_user_list_raw[0]}) channel_user_list = luniq(channel_user_list_raw, idfun=ChannelUser.channel_user2codename) collection = ChannelUserCollection.collection().with_options(write_concern=write_concern) def doc2pair(channel_user): cond = {ChannelUser.Field.CODENAME: ChannelUser.channel_user2codename(channel_user)} return cond, channel_user j_pair_list = lmap(doc2pair, channel_user_list) MongoDBTool.j_pair_list2upsert(collection, j_pair_list) def upsert_marketprice(): marketprice_list = lmap(ig(0), pair_list) collection = MarketpriceCollection.collection().with_options(write_concern=write_concern) j_pair_list = [(x,x) for x in marketprice_list] MongoDBTool.j_pair_list2upsert(collection, j_pair_list) upsert_channel_user() upsert_marketprice()
def db_add_review(review): """"need to make sure insert rating as an integer""" db = mongo.db.reviews.with_options( write_concern=WriteConcern(w=1, j=False, wtimeout=5000) ) the_review = db.insert_one(review) return the_review.inserted_id
def execute_transfers_in_txn(conn, source_acct_list, destin_acct_list, source_coll_list, destin_coll_list, num_transfers_per_txn): session = conn.start_session() session.start_transaction(read_concern=ReadConcern('snapshot'), write_concern=WriteConcern('majority', wtimeout=1000)) for i in range(num_transfers_per_txn): transfer_amount = random.randint(0, 15) source_acct = source_acct_list.pop() destin_acct = destin_acct_list.pop() source_coll = source_coll_list.pop() destin_coll = destin_coll_list.pop() source_coll.update_one({'cust_id': source_acct}, {'$inc': { 'balance': -transfer_amount }}, session=session) time.sleep(0.1) destin_coll.update_one({'cust_id': destin_acct}, {'$inc': { 'balance': transfer_amount }}, session=session) print( str(transfer_amount) + " transferred from: " + str(source_coll.name) + "." + str(source_acct) + " to " + str(destin_coll.name) + "." + str(destin_acct)) session.commit_transaction()
async def update_employee_info(session): employees_coll = session.client.hr.employees events_coll = session.client.reporting.events async with session.start_transaction( read_concern=ReadConcern("snapshot"), write_concern=WriteConcern(w="majority")): await employees_coll.update_one( {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session) await events_coll.insert_one( {"employee": 3, "status": { "new": "Inactive", "old": "Active"}}, session=session) while True: try: # Commit uses write concern set at transaction start. await session.commit_transaction() print("Transaction committed.") break except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label( "UnknownTransactionCommitResult"): print("UnknownTransactionCommitResult, retrying " "commit operation ...") continue else: print("Error during commit ...") raise
def test_sub_collection(self): # Verify that a collection with a dotted name inherits options from its # parent collection. write_concern = WriteConcern(w=2, j=True) read_concern = ReadConcern("majority") read_preference = Secondary([{"dc": "sf"}]) codec_options = CodecOptions(tz_aware=True, uuid_representation=JAVA_LEGACY) coll1 = self.db.get_collection( "test", write_concern=write_concern, read_concern=read_concern, read_preference=read_preference, codec_options=codec_options, ) coll2 = coll1.subcollection coll3 = coll1["subcollection"] for c in [coll1, coll2, coll3]: self.assertEqual(write_concern, c.write_concern) self.assertEqual(read_concern, c.read_concern) self.assertEqual(read_preference, c.read_preference) self.assertEqual(codec_options, c.codec_options)
def _collection_default_options(self, name, **kargs): wc = (self.write_concern if self.write_concern.acknowledged else WriteConcern()) return self.get_collection(name, codec_options=DEFAULT_CODEC_OPTIONS, read_preference=ReadPreference.PRIMARY, write_concern=wc)
def run_scenario(self): # Cleanup state and load data (if provided). drop_collections(self.db) data = scenario_def.get('data') if data: self.db.test.with_options(write_concern=WriteConcern( w="majority")).insert_many(scenario_def['data']) # Run operations and check results or errors. expected_result = test.get('outcome', {}).get('result') expected_error = test.get('outcome', {}).get('error') if expected_error is True: with self.assertRaises(PyMongoError): run_operation(self.db.test, test) else: result = run_operation(self.db.test, test) check_result(self, expected_result, result) # Assert final state is expected. expected_c = test['outcome'].get('collection') if expected_c is not None: expected_name = expected_c.get('name') if expected_name is not None: db_coll = self.db[expected_name] else: db_coll = self.db.test db_coll = db_coll.with_options(read_concern=ReadConcern( level="local")) self.assertEqual(list(db_coll.find()), expected_c['data'])
def test_parallel_scan(self): yield skip_if_mongos(self.cx) collection = self.collection.with_options( write_concern=WriteConcern(test.env.w)) # Enough documents that each cursor requires multiple batches. yield collection.delete_many({}) yield collection.insert_many(({'_id': i} for i in range(8000))) if test.env.is_replica_set: # Test that getMore messages are sent to the right server. client = self.motor_rsc(read_preference=Secondary()) collection = client.motor_test.test_collection docs = [] @gen.coroutine def f(cursor): self.assertTrue( isinstance(cursor, motor.motor_tornado.MotorCommandCursor)) while (yield cursor.fetch_next): docs.append(cursor.next_object()) cursors = yield collection.parallel_scan(3) yield [f(cursor) for cursor in cursors] self.assertEqual(len(docs), (yield collection.count_documents({})))
async def test_with_options(db, users): res = await User.q(db).delete_one({User.name.s: 'xxx'}) assert res == 1 res = await User.q(db)\ .with_options(write_concern=WriteConcern(w=0))\ .delete_one({User.name.s: 'totti'}) assert res is None
def test_insert_many(self): collection = self.collection.with_options( write_concern=WriteConcern(0)) flags = INSERT_FLAGS['ContinueOnError'] docs = [{'_id': 1}, {'_id': 2}] with going(collection.insert_many, docs, ordered=False): self.server.receives(OpInsert(docs, flags=flags))