def get_clusterthroughput(cluster_id, network_type): try: start_time = None end_time = None time_interval = None if len(request.args.items()) > 0: for request_param in request.args.items(): if request_param[0] == "start_time": start_time = request_param[1] elif request_param[0] == "end_time": end_time = request_param[1] elif request_param[0] == "interval": time_interval = request_param[1] entity_name, metric_name = NS.time_series_db_manager.\ get_timeseriesnamefromresource( cluster_id=cluster_id, network_type=network_type, resource_name=pm_consts.CLUSTER_THROUGHPUT, utilization_type=pm_consts.USED ).split( NS.time_series_db_manager.get_plugin().get_delimeter(), 1 ) # Validate cluster_id. Attempt to fetch clusters/cluster_id fails # with EtcdKeyNotFound if cluster if is invalid central_store_util.read_key('/clusters/%s' % cluster_id) return Response( NS.time_series_db_manager.\ get_plugin().\ get_metric_stats( entity_name, metric_name, time_interval=time_interval, start_time=start_time, end_time=end_time ), status=200, mimetype='application/json' ) except ( AttributeError, ValueError, etcd.EtcdException, SyntaxError, TypeError, urllib3.exceptions.HTTPError, TendrlPerformanceMonitoringException ) as ex: return Response(str(ex), status=500, mimetype='application/json')
def get_cluster_summary(self, cluster_id, cluster_name): ret_val = {} cluster_node_ids = central_store_util.get_cluster_node_ids(cluster_id) ret_val['services_count'] = self.get_services_count( cluster_node_ids ) volumes = self.get_cluster_volumes(cluster_id) bricks = self.get_cluster_bricks(cluster_id) ret_val['volume_status_wise_counts'] = \ self.get_volume_status_wise_counts( cluster_id, volumes ) ret_val['brick_status_wise_counts'] = \ self.get_brick_status_wise_counts( cluster_id, bricks ) ret_val['most_used_volumes'] = self.get_most_used_volumes( cluster_name, volumes ) ret_val['throughput'] = self.get_cluster_throughput( 'cluster_network', central_store_util.get_cluster_node_contexts(cluster_id), cluster_id ) ret_val['most_used_bricks'] = self.get_most_used_bricks( bricks ) connection_active = 0 try: connection_active = central_store_util.read_key( '/clusters/%s/GlobalDetails/connection_active' % cluster_id ).value except EtcdKeyNotFound: pass ret_val['connection_active'] = connection_active or 0 connection_count = 0 try: connection_count = central_store_util.read_key( '/clusters/%s/GlobalDetails/connection_count' % cluster_id ).value except EtcdKeyNotFound: pass ret_val['connection_count'] = connection_count or 0 return ret_val
def get_cluster_volume_ids(self, cluster_id): volume_ids = [] try: etcd_volume_ids = central_store_util.read_key( '/clusters/%s/Volumes' % cluster_id ) except EtcdKeyNotFound: return volume_ids for etcd_volume in etcd_volume_ids.leaves: etcd_volume_contents = etcd_volume.key.split('/') # /clusters/eb3ce823-70e8-418f-bdc4-d0124ae926f8/Volumes/abce1d94-3918-4faf-bf70-9eee07696da2 if len(etcd_volume_contents) == 5: volume_ids.append(etcd_volume_contents[4]) return volume_ids
def get_cluster_pool_ids(self, cluster_id): pools = [] try: etcd_pools = central_store_util.read_key( '/clusters/%s/Pools/' % cluster_id ) for etcd_pool in etcd_pools.leaves: # /clusters/a88ada59-f52b-4608-9311-96cccfbbbf6a/Pools/0 pool_key_contents = etcd_pool.key.split('/') if len(pool_key_contents) == 5: pools.append(pool_key_contents[4]) except (EtcdKeyNotFound, TendrlPerformanceMonitoringException): pass return pools
def get_cluster_bricks(self, cluster_id): ret_val = {} try: etcd_bricks = central_store_util.read_key( '/clusters/%s/Bricks/all' % cluster_id ) except EtcdKeyNotFound: return ret_val for etcd_brick in etcd_bricks.leaves: try: etcd_brick_key_contents = etcd_brick.key.split('/') brick = etcd_read_key( '/clusters/%s/Bricks/all/%s' % ( cluster_id, etcd_brick_key_contents[5] ) ) if 'vol_id' not in brick: continue if ( 'utilization' in brick and 'brick_path' in brick ): brick['utilization']['vol_name'] = \ central_store_util.get_volume_name( cluster_id, brick['vol_id'] ) brick['utilization']['cluster_name'] = \ central_store_util.get_cluster_name(cluster_id) brick['utilization']['brick_path'] = \ brick['brick_path'] brick['utilization']['hostname'] = \ brick['hostname'] ret_val[etcd_brick_key_contents[5]] = brick except EtcdKeyNotFound as ex: Event( ExceptionMessage( priority="debug", publisher=NS.publisher_id, payload={ "message": "Error fetching details for %s" " brick" % etcd_brick.key, "exception": ex } ) ) continue return ret_val
def get_rbd_names(self, cluster_id, pool_ids): rbd_names = {} for pool_id in pool_ids: try: etcd_pool_rbds = central_store_util.read_key( '/clusters/%s/Pools/%s/Rbds' % ( cluster_id, pool_id ) ) for etcd_pool_rbd in etcd_pool_rbds.leaves: etcd_pool_rbd_key_contents = etcd_pool_rbd.key.split('/') # /clusters/a88ada59-f52b-4608-9311-96cccfbbbf6a/Pools/0/Rbds/MyBlockDevice if len(etcd_pool_rbd_key_contents) == 7: pool_rbds = rbd_names.get(pool_id, []) pool_rbds.append(etcd_pool_rbd_key_contents[6]) rbd_names[pool_id] = pool_rbds except (EtcdKeyNotFound, TendrlPerformanceMonitoringException): continue return rbd_names
def get_volume_status_wise_counts(self, cluster_id, volumes): volume_status_wise_counts = { 'down': 0, 'total': 0, 'degraded': 0, pm_consts.CRITICAL_ALERTS: 0, pm_consts.WARNING_ALERTS: 0 } # Needs to be tested for vol_id, vol_det in volumes.iteritems(): if 'Started' not in vol_det.get('status', ''): volume_status_wise_counts['down'] = \ volume_status_wise_counts['down'] + 1 volume_status_wise_counts['total'] = \ volume_status_wise_counts['total'] + 1 volumes_up_degraded = 0 try: volumes_up_degraded = central_store_util.read_key( '/clusters/%s/GlobalDetails/volume_up_degraded' % cluster_id ).value except EtcdKeyNotFound: pass volume_status_wise_counts['degraded'] = \ int(volumes_up_degraded or 0) crit_alerts, warn_alerts = parse_resource_alerts( 'volume', pm_consts.CLUSTER, cluster_id=cluster_id ) volume_status_wise_counts[ pm_consts.CRITICAL_ALERTS ] = len(crit_alerts) volume_status_wise_counts[ pm_consts.WARNING_ALERTS ] = len(warn_alerts) return volume_status_wise_counts