def _poll_url(self, url, node): verify_ssl = self.params['poll_url_ssl_verify'] conn_error_as_unhealthy = self.params[ 'poll_url_conn_error_as_unhealthy'] expected_resp_str = self.params['poll_url_healthy_response'] retry_interval = self.params['poll_url_retry_interval'] timeout = max(retry_interval * 0.1, 1) try: result = utils.url_fetch(url, timeout=timeout, verify=verify_ssl) except Exception as ex: if conn_error_as_unhealthy: LOG.info("%s for %s: connection error when polling URL (%s)", consts.POLL_URL_FAIL, node.name, ex) return False else: LOG.info( "%s for %s: ignoring connection error when polling " "URL (%s)", consts.POLL_URL_PASS, node.name, ex) return True if not re.search(expected_resp_str, result): LOG.info( "%s for %s: did not find expected response string %s in " "URL result (%s)", consts.POLL_URL_FAIL, node.name, expected_resp_str, result) return False LOG.info("%s for %s: matched expected response string.", consts.POLL_URL_PASS, node.name) return True
def test_file_scheme_supported(self, mock_urlopen): data = '{ "foo": "bar" }' url = 'file:///etc/profile' mock_urlopen.return_value = io.StringIO(data) actual = utils.url_fetch(url, allowed_schemes=['file']) self.assertEqual(data, actual)
def test_file_scheme_supported(self): data = '{ "foo": "bar" }' url = 'file:///etc/profile' self.patchobject(six.moves.urllib.request, 'urlopen', return_value=six.moves.cStringIO(data)) actual = utils.url_fetch(url, allowed_schemes=['file']) self.assertEqual(data, actual)
def test_byte_response(self, mock_get): url = 'http://example.com/somedata' data = b'{ "foo": "bar" }' mock_resp = mock.Mock() mock_resp.iter_content.return_value = [data] mock_get.return_value = mock_resp self.assertEqual('{ "foo": "bar" }', utils.url_fetch(url))
def test_max_fetch_size_okay(self): url = 'http://example.com/somedata' data = '{ "foo": "bar" }' cfg.CONF.set_override('max_response_size', 500) self.patchobject(requests, 'get', return_value=Response(data)) utils.url_fetch(url)
def test_https_scheme(self): url = 'https://example.com/somedata' data = '{ "foo": "bar" }' self.patchobject(requests, 'get', return_value=Response(data)) self.assertEqual(data, utils.url_fetch(url))
def test_max_fetch_size_okay(self): url = 'http://example.com/somedata' data = '{ "foo": "bar" }' cfg.CONF.set_override('max_response_size', 500, enforce_type=True) self.patchobject(requests, 'get', return_value=Response(data)) utils.url_fetch(url)
def run_health_check(self, ctx, node): """Routine to check a node status from a url and recovery if necessary :param node: The node to be checked. :returns: True if node is considered to be healthy. False otherwise. """ url_template = self.params['poll_url'] verify_ssl = self.params['poll_url_ssl_verify'] conn_error_as_unhealthy = self.params[ 'poll_url_conn_error_as_unhealthy'] expected_resp_str = self.params['poll_url_healthy_response'] max_unhealthy_retry = self.params['poll_url_retry_limit'] retry_interval = self.params['poll_url_retry_interval'] def stop_node_recovery(): node_last_updated = node.updated_at or node.init_at if not timeutils.is_older_than(node_last_updated, self.node_update_timeout): LOG.info( "Node %s was updated at %s which is less than " "%d secs ago. Skip node recovery from " "NodePollUrlHealthCheck.", node.id, node_last_updated, self.node_update_timeout) return True LOG.info("Node %s is reported as down (%d retries left)", node.id, available_attemps) time.sleep(retry_interval) return False url = self._expand_url_template(url_template, node) LOG.debug("Polling node status from URL: %s", url) available_attemps = max_unhealthy_retry timeout = max(retry_interval * 0.1, 1) while available_attemps > 0: available_attemps -= 1 try: result = utils.url_fetch(url, timeout=timeout, verify=verify_ssl) except utils.URLFetchError as ex: if conn_error_as_unhealthy: if stop_node_recovery(): return True continue else: LOG.error( "Error when requesting node health status from" " %s: %s", url, ex) return True LOG.debug("Node status returned from URL(%s): %s", url, result) if re.search(expected_resp_str, result): LOG.debug('NodePollUrlHealthCheck reports node %s is healthy.', node.id) return True if node.status != consts.NS_ACTIVE: LOG.info( "Skip node recovery because node %s is not in " "ACTIVE state.", node.id) return True if stop_node_recovery(): return True return False
def _check_url_and_recover_node(self, ctx, node, recover_action, params): """Routine to check a node status from a url and recovery if necessary :param ctx: The request context to use for recovery action :param node: The node to be checked. :param recover_action: The health policy action name. :param params: Parameters specific to poll url or recovery action :returns: action if node was triggered for recovery. Otherwise None. """ url_template = params['poll_url'] verify_ssl = params['poll_url_ssl_verify'] expected_resp_str = params['poll_url_healthy_response'] max_unhealthy_retry = params['poll_url_retry_limit'] retry_interval = params['poll_url_retry_interval'] node_update_timeout = params['node_update_timeout'] url = self._expand_url_template(url_template, node) LOG.info("Polling node status from URL: %s", url) available_attemps = max_unhealthy_retry while available_attemps > 0: available_attemps -= 1 try: result = utils.url_fetch(url, verify=verify_ssl) except utils.URLFetchError as ex: LOG.error( "Error when requesting node health status from" " %s: %s", url, ex) return None LOG.debug("Node status returned from URL(%s): %s", url, result) if re.search(expected_resp_str, result): LOG.debug('Node %s is healthy', node.id) return None if node.status != consts.NS_ACTIVE: LOG.info( "Skip node recovery because node %s is not in " "ACTIVE state", node.id) return None node_last_updated = node.updated_at or node.init_at if not timeutils.is_older_than(node_last_updated, node_update_timeout): LOG.info( "Node %s was updated at %s which is less than " "%d secs ago. Skip node recovery.", node.id, node_last_updated, node_update_timeout) return None LOG.info("Node %s is reported as down (%d retries left)", node.id, available_attemps) time.sleep(retry_interval) # recover node after exhausting retries LOG.info("Requesting node recovery: %s", node.id) req = objects.NodeRecoverRequest(identity=node.id, params=recover_action) return self.rpc_client.call(ctx, 'node_recover', req)