def drop_ip(self, endpoint_name, ip): logging.info("Dropping endpoint %s IP %s" % (endpoint_name, ip)) self.zk_conn.delete(paths.endpoint_ip_metrics(endpoint_name, ip)) self.zk_conn.delete(paths.confirmed_ip(endpoint_name, ip)) self.zk_conn.delete(paths.ip_address(ip)) for name in self.config.loadbalancer_names(): self.zk_conn.delete(paths.loadbalancer_ip(name, ip))
def set_endpoint_metrics(self, endpoint_name, metrics, endpoint_ip=None): if endpoint_ip: self.zk_conn.write( paths.endpoint_ip_metrics(endpoint_name, endpoint_ip), json.dumps(metrics)) else: self.zk_conn.write( paths.endpoint_custom_metrics(endpoint_name), json.dumps(metrics))
def load_metrics(self, endpoint, endpoint_metrics={}): """ Load the particular metrics for a endpoint and return a tuple (metrics, active_connections) where metrics are the metrics to use for the endpoint and active_connections is a list of ip addresses with active connections. """ # Read any default metrics. We can override the source endpoint for # metrics here (so, for example, a backend database server can inheret # a set of metrics given for the front server). This, like many other # things, is specified here by the name of the endpoint we are # inheriting metrics for. If not given, we default to the current # endpoint. source_key = endpoint.source_key() if source_key: (metric_ips, metrics) = endpoint_metrics.get(source_key, ([], [])) else: (metric_ips, metrics) = endpoint_metrics.get(endpoint.key(), ([], [])) default_metrics = self.zk_conn.read(paths.endpoint_custom_metrics(endpoint.name)) if default_metrics: try: # This should be a dictionary { "name" : (weight, value) } metrics.append(json.loads(default_metrics)) except ValueError: logging.warn("Invalid custom metrics for %s." % (endpoint.name)) # Read other metrics for given hosts. active_connections = [] for ip_address in self.active_ips(endpoint.name): ip_metrics = self.zk_conn.read(paths.endpoint_ip_metrics(endpoint.name, ip_address)) if ip_metrics: try: # This should be a dictionary { "name" : (weight, value) } ip_metrics = json.loads(ip_metrics) metrics.append(ip_metrics) if not ip_address in metric_ips: metric_ips.append(ip_address) if self.metric_indicates_active(ip_metrics): active_connections.append(ip_address) except ValueError: logging.warn("Invalid instance metrics for %s:%s." % \ (endpoint.name, ip_address)) for instance_id in self.decommissioned_instances(endpoint.name): # Also check the metrics of decommissioned instances looking for any active counts. for ip_address in self.decommissioned_instance_ip_addresses(endpoint.name, instance_id): if ip_address: ip_metrics = self.zk_conn.read(paths.endpoint_ip_metrics(endpoint.name, ip_address)) if ip_metrics: try: # As above, this should be a dictionary. ip_metrics = json.loads(ip_metrics) metrics.append(ip_metrics) if not ip_address in metric_ips: metric_ips.append(ip_address) if self.metric_indicates_active(ip_metrics): active_connections.append(ip_address) except ValueError: logging.warn("Invalid instance metrics for %s:%s." % \ (endpoint.name, ip_address)) # Return the metrics. return metrics, list(set(metric_ips)), active_connections