def test_decrease_poolsize(self): # Test increasing and decreasing the pool size max_connections = 3 protocol = Protocol(config=Configuration( service_endpoint='https://example.com/Foo.asmx', credentials=Credentials(get_random_string(8), get_random_string( 8)), auth_type=NTLM, version=Version(Build(15, 1)), retry_policy=FailFast(), max_connections=max_connections, )) self.assertEqual(protocol._session_pool.qsize(), 0) self.assertEqual(protocol.session_pool_size, 0) protocol.increase_poolsize() protocol.increase_poolsize() protocol.increase_poolsize() with self.assertRaises(SessionPoolMaxSizeReached): protocol.increase_poolsize() self.assertEqual(protocol._session_pool.qsize(), max_connections) self.assertEqual(protocol.session_pool_size, max_connections) protocol.decrease_poolsize() protocol.decrease_poolsize() with self.assertRaises(SessionPoolMinSizeReached): protocol.decrease_poolsize() self.assertEqual(protocol._session_pool.qsize(), 1)
def test_protocol_instance_caching(self, m): # Verify that we get the same Protocol instance for the same combination of (endpoint, credentials) user, password = get_random_string(8), get_random_string(8) config = Configuration(service_endpoint='https://example.com/Foo.asmx', credentials=Credentials(user, password), auth_type=NTLM, version=Version(Build(15, 1)), retry_policy=FailFast()) # Test CachingProtocol.__getitem__ with self.assertRaises(KeyError): _ = Protocol[config] base_p = Protocol(config=config) self.assertEqual(base_p, Protocol[config][0]) # Make sure we always return the same item when creating a Protocol with the same endpoint and creds for _ in range(10): p = Protocol(config=config) self.assertEqual(base_p, p) self.assertEqual(id(base_p), id(p)) self.assertEqual(hash(base_p), hash(p)) self.assertEqual(id(base_p._session_pool), id(p._session_pool)) # Test CachingProtocol.__delitem__ del Protocol[config] with self.assertRaises(KeyError): _ = Protocol[config] # Make sure we get a fresh instance after we cleared the cache p = Protocol(config=config) self.assertNotEqual(base_p, p) Protocol.clear_cache()
def test_autodiscover_cache(self, m): # Mock the default endpoint that we test in step 1 of autodiscovery m.post(self.dummy_ad_endpoint, status_code=200, content=self.dummy_ad_response) # Also mock the EWS URL. We try to guess its auth method as part of autodiscovery m.post(self.dummy_ews_endpoint, status_code=200) discovery = Autodiscovery( email=self.account.primary_smtp_address, credentials=self.account.protocol.credentials, retry_policy=self.retry_policy, ) # Not cached self.assertNotIn(discovery._cache_key, autodiscover_cache) discovery.discover() # Now it's cached self.assertIn(discovery._cache_key, autodiscover_cache) # Make sure the cache can be looked by value, not by id(). This is important for multi-threading/processing self.assertIn( (self.account.primary_smtp_address.split('@')[1], Credentials(self.account.protocol.credentials.username, self.account.protocol.credentials.password), True), autodiscover_cache) # Poison the cache with a failing autodiscover endpoint. discover() must handle this and rebuild the cache autodiscover_cache[discovery._cache_key] = AutodiscoverProtocol( config=Configuration( service_endpoint= 'https://example.com/Autodiscover/Autodiscover.xml', credentials=Credentials(get_random_string(8), get_random_string(8)), auth_type=NTLM, retry_policy=FailFast(), )) m.post('https://example.com/Autodiscover/Autodiscover.xml', status_code=404) discovery.discover() self.assertIn(discovery._cache_key, autodiscover_cache) # Make sure that the cache is actually used on the second call to discover() _orig = discovery._step_1 def _mock(slf, *args, **kwargs): raise NotImplementedError() discovery._step_1 = MethodType(_mock, discovery) discovery.discover() # Fake that another thread added the cache entry into the persistent storage but we don't have it in our # in-memory cache. The cache should work anyway. autodiscover_cache._protocols.clear() discovery.discover() discovery._step_1 = _orig # Make sure we can delete cache entries even though we don't have it in our in-memory cache autodiscover_cache._protocols.clear() del autodiscover_cache[discovery._cache_key] # This should also work if the cache does not contain the entry anymore del autodiscover_cache[discovery._cache_key]
def test_protocol_instance_caching(self, m): # Verify that we get the same Protocol instance for the same combination of (endpoint, credentials) user, password = get_random_string(8), get_random_string(8) base_p = Protocol(config=Configuration( service_endpoint='https://example.com/Foo.asmx', credentials=Credentials(user, password), auth_type=NTLM, version=Version(Build(15, 1)), retry_policy=FailFast() )) for _ in range(10): p = Protocol(config=Configuration( service_endpoint='https://example.com/Foo.asmx', credentials=Credentials(user, password), auth_type=NTLM, version=Version(Build(15, 1)), retry_policy=FailFast() )) self.assertEqual(base_p, p) self.assertEqual(id(base_p), id(p)) self.assertEqual(hash(base_p), hash(p)) self.assertEqual(id(base_p._session_pool), id(p._session_pool)) Protocol.clear_cache()
def test_session(self, m): protocol = Protocol(config=Configuration( service_endpoint='https://example.com/Foo.asmx', credentials=Credentials(get_random_string(8), get_random_string(8)), auth_type=NTLM, version=Version(Build(15, 1)), retry_policy=FailFast() )) session = protocol.create_session() new_session = protocol.renew_session(session) self.assertNotEqual(id(session), id(new_session))
def get_test_protocol(**kwargs): return AutodiscoverProtocol( config=Configuration( service_endpoint=kwargs.get("service_endpoint", "https://example.com/Autodiscover/Autodiscover.xml"), credentials=kwargs.get("credentials", Credentials(get_random_string(8), get_random_string(8))), auth_type=kwargs.get("auth_type", NTLM), retry_policy=kwargs.get("retry_policy", FailFast()), ) )
def test_pickle(self): # Test that we can pickle, repr and str Protocols o = Protocol(config=Configuration( service_endpoint='https://example.com/Foo.asmx', credentials=Credentials(get_random_string(8), get_random_string(8)), auth_type=NTLM, version=Version(Build(15, 1)), retry_policy=FailFast() )) pickled_o = pickle.dumps(o) unpickled_o = pickle.loads(pickled_o) self.assertIsInstance(unpickled_o, type(o)) self.assertEqual(repr(o), repr(unpickled_o)) self.assertEqual(str(o), str(unpickled_o))
def test_autodiscover_direct_gc(self, m): # Test garbage collection of the autodiscover cache c = Credentials(get_random_string(8), get_random_string(8)) autodiscover_cache[('example.com', c)] = AutodiscoverProtocol( config=Configuration( service_endpoint= 'https://example.com/Autodiscover/Autodiscover.xml', credentials=c, auth_type=NTLM, retry_policy=FailFast(), )) self.assertEqual(len(autodiscover_cache), 1) autodiscover_cache.__del__()
def test_close_autodiscover_connections(self, m): # A live test that we can close TCP connections c = Credentials(get_random_string(8), get_random_string(8)) autodiscover_cache[('example.com', c)] = AutodiscoverProtocol( config=Configuration( service_endpoint= 'https://example.com/Autodiscover/Autodiscover.xml', credentials=c, auth_type=NTLM, retry_policy=FailFast(), )) self.assertEqual(len(autodiscover_cache), 1) close_connections()
def get_test_protocol(**kwargs): return Protocol(config=Configuration( server=kwargs.get("server"), service_endpoint=kwargs.get( "service_endpoint", f"https://{get_random_hostname()}/Foo.asmx"), credentials=kwargs.get( "credentials", Credentials(get_random_string(8), get_random_string(8))), auth_type=kwargs.get("auth_type", NTLM), version=kwargs.get("version", Version(Build(15, 1))), retry_policy=kwargs.get("retry_policy", FailFast()), max_connections=kwargs.get("max_connections"), ))
def test_close(self): # Don't use example.com here - it does not resolve or answer on all ISPs proc = psutil.Process() ip_addresses = { info[4][0] for info in socket.getaddrinfo('httpbin.org', 80, socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) } def conn_count(): return len( [p for p in proc.connections() if p.raddr[0] in ip_addresses]) self.assertGreater(len(ip_addresses), 0) protocol = Protocol(config=Configuration( service_endpoint='http://httpbin.org', credentials=Credentials(get_random_string(8), get_random_string( 8)), auth_type=NOAUTH, version=Version(Build(15, 1)), retry_policy=FailFast(), max_connections=3)) # Merely getting a session should not create conections session = protocol.get_session() self.assertEqual(conn_count(), 0) # Open one URL - we have 1 connection session.get('http://httpbin.org') self.assertEqual(conn_count(), 1) # Open the same URL - we should still have 1 connection session.get('http://httpbin.org') self.assertEqual(conn_count(), 1) # Open some more connections s2 = protocol.get_session() s2.get('http://httpbin.org') s3 = protocol.get_session() s3.get('http://httpbin.org') self.assertEqual(conn_count(), 3) # Releasing the sessions does not close the connections protocol.release_session(session) protocol.release_session(s2) protocol.release_session(s3) self.assertEqual(conn_count(), 3) # But closing explicitly does protocol.close() self.assertEqual(conn_count(), 0)
def test_handle_backoff(self): # Test that we can handle backoff messages svc = ResolveNames(self.account.protocol) tmp = svc._response_generator orig_policy = self.account.protocol.config.retry_policy try: # We need to fail fast so we don't end up in an infinite loop self.account.protocol.config.retry_policy = FailFast() svc._response_generator = Mock( side_effect=ErrorServerBusy("XXX", back_off=1)) with self.assertRaises(ErrorServerBusy) as e: list(svc._get_elements(create_element("XXX"))) self.assertEqual(e.exception.args[0], "XXX") finally: svc._response_generator = tmp self.account.protocol.config.retry_policy = orig_policy
def test_magic(self, m): # Just test we don't fail when calling repr() and str(). Insert a dummy cache entry for testing c = Credentials(get_random_string(8), get_random_string(8)) autodiscover_cache[('example.com', c)] = AutodiscoverProtocol( config=Configuration( service_endpoint= 'https://example.com/Autodiscover/Autodiscover.xml', credentials=c, auth_type=NTLM, retry_policy=FailFast(), )) self.assertEqual(len(autodiscover_cache), 1) str(autodiscover_cache) repr(autodiscover_cache) for protocol in autodiscover_cache._protocols.values(): str(protocol) repr(protocol)
def test_post_ratelimited(self): url = 'https://example.com' protocol = self.account.protocol orig_policy = protocol.config.retry_policy RETRY_WAIT = exchangelib.util.RETRY_WAIT MAX_REDIRECTS = exchangelib.util.MAX_REDIRECTS session = protocol.get_session() try: # Make sure we fail fast in error cases protocol.config.retry_policy = FailFast() # Test the straight, HTTP 200 path session.post = mock_post(url, 200, {}, 'foo') r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') self.assertEqual(r.content, b'foo') # Test exceptions raises by the POST request for err_cls in CONNECTION_ERRORS: session.post = mock_session_exception(err_cls) with self.assertRaises(err_cls): r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') # Test bad exit codes and headers session.post = mock_post(url, 401, {}) with self.assertRaises(UnauthorizedError): r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') session.post = mock_post(url, 999, {'connection': 'close'}) with self.assertRaises(TransportError): r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') session.post = mock_post( url, 302, { 'location': '/ews/genericerrorpage.htm?aspxerrorpath=/ews/exchange.asmx' }) with self.assertRaises(TransportError): r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') session.post = mock_post(url, 503, {}) with self.assertRaises(TransportError): r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') # No redirect header session.post = mock_post(url, 302, {}) with self.assertRaises(TransportError): r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') # Redirect header to same location session.post = mock_post(url, 302, {'location': url}) with self.assertRaises(TransportError): r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') # Redirect header to relative location session.post = mock_post(url, 302, {'location': url + '/foo'}) with self.assertRaises(RedirectError): r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') # Redirect header to other location and allow_redirects=False session.post = mock_post(url, 302, {'location': 'https://contoso.com'}) with self.assertRaises(TransportError): r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') # Redirect header to other location and allow_redirects=True exchangelib.util.MAX_REDIRECTS = 0 session.post = mock_post(url, 302, {'location': 'https://contoso.com'}) with self.assertRaises(TransportError): r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='', allow_redirects=True) # CAS error session.post = mock_post(url, 999, {'X-CasErrorCode': 'AAARGH!'}) with self.assertRaises(CASError): r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') # Allow XML data in a non-HTTP 200 response session.post = mock_post(url, 500, {}, '<?xml version="1.0" ?><foo></foo>') r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') self.assertEqual(r.content, b'<?xml version="1.0" ?><foo></foo>') # Bad status_code and bad text session.post = mock_post(url, 999, {}) with self.assertRaises(TransportError): r, session = post_ratelimited(protocol=protocol, session=session, url=url, headers=None, data='') # Test rate limit exceeded exchangelib.util.RETRY_WAIT = 1 protocol.config.retry_policy = FaultTolerance( max_wait=0.5) # Fail after first RETRY_WAIT session.post = mock_post(url, 503, {'connection': 'close'}) # Mock renew_session to return the same session so the session object's 'post' method is still mocked protocol.renew_session = lambda s: s with self.assertRaises(RateLimitError) as rle: r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') self.assertEqual(rle.exception.status_code, 503) self.assertEqual(rle.exception.url, url) self.assertTrue(1 <= rle.exception.total_wait < 2) # One RETRY_WAIT plus some overhead # Test something larger than the default wait, so we retry at least once protocol.retry_policy.max_wait = 3 # Fail after second RETRY_WAIT session.post = mock_post(url, 503, {'connection': 'close'}) with self.assertRaises(RateLimitError) as rle: r, session = post_ratelimited(protocol=protocol, session=session, url='http://', headers=None, data='') self.assertEqual(rle.exception.status_code, 503) self.assertEqual(rle.exception.url, url) # We double the wait for each retry, so this is RETRY_WAIT + 2*RETRY_WAIT plus some overhead self.assertTrue(3 <= rle.exception.total_wait < 4, rle.exception.total_wait) finally: protocol.retire_session( session) # We have patched the session, so discard it # Restore patched attributes and functions protocol.config.retry_policy = orig_policy exchangelib.util.RETRY_WAIT = RETRY_WAIT exchangelib.util.MAX_REDIRECTS = MAX_REDIRECTS try: delattr(protocol, 'renew_session') except AttributeError: pass
def test_fail_fast_back_off(self): # Test that FailFast does not support back-off logic c = FailFast() self.assertIsNone(c.back_off_until) with self.assertRaises(AttributeError): c.back_off_until = 1