def test_targetted_upsert(self): # Tests a bug around targetted upserts going out to all clusters and # causing data to appear in multiple places. doc1 = {'_id': 'alpha', 'x': 1, 'y': 1} operations.multishard_update( 'dummy', {'_id': 'alpha'}, {'$set': {'x': 1, 'y': 1}}, upsert=True) results = list(self.db1.dummy.find({'y': 1})) self.assertEquals([doc1], results) results = list(self.db2.dummy.find({'y': 1})) self.assertEquals([], results)
def test_targetted_replace_upsert(self): # Tests a bug around targetted upserts going out to all clusters and # causing data to appear in multiple places when performing a replace # instead of a set. A targetted replace will have an ID generated by # pymongo instead of our custom ID. operations.multishard_update( 'dummy', {'x': '1'}, {'x': 1, 'y': 1}, upsert=True) results = list(self.db1.dummy.find({'y': 1})) self.assertEquals(1, len(results)) results = list(self.db2.dummy.find({'y': 1})) self.assertEquals(0, len(results))
def test_update(self): # Put the same document in multiple locations (a mid-migration status) # then do an update and ensure that only the correct place has been # updated. api.set_shard_at_rest('dummy', 1, "dest1/test_sharding") doc1 = {'x': 1, 'y': 1} self.db1.dummy.insert(doc1) api.start_migration('dummy', 1, 'dest2/test_sharding') api.set_shard_to_migration_status('dummy', 1, api.ShardStatus.MIGRATING_COPY) self.db2.dummy.insert(doc1) result = operations.multishard_update('dummy', {}, {'$inc': {'y': 1}}) self.assertEquals(1, result['n']) # Query the correct shard first and see that the counter has been # incremented result, = operations.multishard_find('dummy', {'x': 1}) self.assertEquals(2, result['y']) # Now spoof the metadata such that the system thinks the data is on # shard2. The counter should still be 1 here. api.set_shard_at_rest('dummy', 1, "dest2/test_sharding", force=True) result, = operations.multishard_find('dummy', {'x': 1}) self.assertEquals(1, result['y'])
def test_update(self): # Put the same document in multiple locations (a mid-migration status) # then do an update and ensure that only the correct place has been # updated. api.set_shard_at_rest('dummy', 1, "dest1/test_sharding") doc1 = {'x': 1, 'y': 1} self.db1.dummy.insert(doc1) api.start_migration('dummy', 1, 'dest2/test_sharding') api.set_shard_to_migration_status( 'dummy', 1, api.ShardStatus.MIGRATING_COPY) self.db2.dummy.insert(doc1) result = operations.multishard_update('dummy', {}, {'$inc': {'y': 1}}) self.assertEquals(1, result['n']) # Query the correct shard first and see that the counter has been # incremented result, = operations.multishard_find('dummy', {'x': 1}) self.assertEquals(2, result['y']) # Now spoof the metadata such that the system thinks the data is on # shard2. The counter should still be 1 here. api.set_shard_at_rest('dummy', 1, "dest2/test_sharding", force=True) result, = operations.multishard_find('dummy', {'x': 1}) self.assertEquals(1, result['y'])
def test_multi_update(self): # Test that an update will hit multiple clusters at once doc1 = {'x': 1, 'y': 1} doc2 = {'x': 2, 'y': 1} self.db1.dummy.insert(doc1) self.db2.dummy.insert(doc2) result = operations.multishard_update('dummy', {}, {'$inc': {'y': 1}}) self.assertEquals(2, result['n']) result, = operations.multishard_find('dummy', {'x': 1}) self.assertEquals(2, result['y']) result, = operations.multishard_find('dummy', {'x': 2}) self.assertEquals(2, result['y'])
def update(self, *args, **kwargs): return operations.multishard_update( self.collection_name, *args, **kwargs)