def setUpClass(cls): super(TestLegacyBulkWriteConcern, cls).setUpClass() cls.w = client_context.w cls.secondary = None if cls.w > 1: for member in client_context.ismaster['hosts']: if member != client_context.ismaster['primary']: cls.secondary = single_client(*partition_node(member)) break # We tested wtimeout errors by specifying a write concern greater than # the number of members, but in MongoDB 2.7.8+ this causes a different # sort of error, "Not enough data-bearing nodes". In recent servers we # use a failpoint to pause replication on a secondary. cls.need_replication_stopped = client_context.version.at_least(2, 7, 8) cls.deprecation_filter = DeprecationFilter()
def setUpClass(cls): super(IgnoreDeprecationsTest, cls).setUpClass() cls.deprecation_filter = DeprecationFilter()
def setUpClass(cls): super(TestLegacyBulkNoResults, cls).setUpClass() cls.deprecation_filter = DeprecationFilter()
def setUpClass(cls): super(TestLegacy, cls).setUpClass() cls.w = client_context.w cls.deprecation_filter = DeprecationFilter()
def setUpClass(cls): super(TestDeprecations, cls).setUpClass() cls.deprecation_filter = DeprecationFilter("error")
def setUpClass(cls): super(TestLegacyBulkAuthorization, cls).setUpClass() cls.deprecation_filter = DeprecationFilter()