def amp_missed_heartbeats(self, event): missed_heartbeats = {} for r in event.results: ts_date = r.get(1) amp_id = r.get(2) if ts_date not in missed_heartbeats: missed_heartbeats[ts_date] = {} if amp_id not in missed_heartbeats[ts_date]: missed_heartbeats[ts_date][amp_id] = 1 else: missed_heartbeats[ts_date][amp_id] += 1 # sort each amp by occurences for ts_date, amps in missed_heartbeats.items(): missed_heartbeats[ts_date] = utils.sorted_dict(amps, key=lambda e: e[1], reverse=True) if not missed_heartbeats: return # then sort by date return utils.sorted_dict(missed_heartbeats)
def _all(self): """ Returns dict of all packages matched. """ if self._all_images: return self._all_images used_images = self.get_container_images() image_list = self.cli.docker_images() if not image_list: return all_exprs = self.core_image_exprs + self.other_image_exprs for line in image_list: for image in all_exprs: fullname, shortname, version = self._match_image(image, line) if shortname is None: continue if (fullname, version) not in used_images: continue if image in self.core_image_exprs: self._core_images[shortname] = version else: self._other_images[shortname] = version # ensure sorted self._core_images = sorted_dict(self._core_images) self._other_images = sorted_dict(self._other_images) combined = {} combined.update(self._core_images) combined.update(self._other_images) self._all_images = sorted_dict(combined) return self._all_images
def _all(self): """ Returns dict of all packages matched. """ if self._all_packages: return self._all_packages dpkg_l = self.cli.dpkg_l() if not dpkg_l: return self._all_packages all_exprs = self.core_pkg_exprs + self.other_pkg_exprs for line in dpkg_l: for pkg in all_exprs: name, version = self._match_package(pkg, line) if name is None: continue if pkg in self.core_pkg_exprs: self._core_packages[name] = version else: self._other_packages[name] = version # ensure sorted self._core_packages = sorted_dict(self._core_packages) self._other_packages = sorted_dict(self._other_packages) combined = {} combined.update(self._core_packages) combined.update(self._other_packages) self._all_packages = sorted_dict(combined) return self._all_packages
def connections(self): _connections = {'host': {}, 'client': {}} sd = self.connections_searchdef for results in self.results.find_sequence_sections(sd).values(): for result in results: if result.tag == sd.body_tag: host = result.get(1) if host not in _connections['host']: _connections['host'][host] = 1 else: _connections['host'][host] += 1 # detect 3.6.x or 3.8.x format user = result.get(2) if user is None: user = result.get(3) client_name = result.get(4) if user not in _connections['client']: _connections['client'][user] = {} if client_name not in _connections['client'][user]: _connections['client'][user][client_name] = 1 else: _connections['client'][user][client_name] += 1 if _connections['host']: for client, users in _connections['client'].items(): sorted_users = sorted_dict(users, key=lambda e: e[1], reverse=True) _connections['client'][client] = sorted_users return _connections
def __summary_local_osds(self): if self.local_osds: osds = {} for osd in self.local_osds: osds.update(osd.to_dict()) return sorted_dict(osds)
def osds_pgs_above_max(self): _osds_pgs = {} for osd, num_pgs in self.osds_pgs.items(): if num_pgs > self.OSD_PG_MAX_LIMIT: _osds_pgs[osd] = num_pgs return utils.sorted_dict(_osds_pgs, key=lambda e: e[1], reverse=True)
def src_migration(self, event): """ Source migration is defined as a sequence so that we can capture some of in the interim events such as memory and disk progress. """ migration_info = {} info_idxs = {'memory': 4, 'disk': 5} results = self.migration_seq_info(event, 3, info_idxs, incl_time_in_date=True) for vm_uuid, sections in results.items(): for section in sections.values(): samples = {} start = None end = None for date, info in utils.sorted_dict(section).items(): if start is None: start = date end = date for rtype, values in info.items(): if rtype not in samples: samples[rtype] = [] samples[rtype] += [int(i) for i in values] _start = datetime.strptime(start, "%Y-%m-%d %H:%M:%S") _end = datetime.strptime(end, "%Y-%m-%d %H:%M:%S") duration = round(float((_end - _start).total_seconds()), 2) info = {'start': start, 'end': end, 'duration': duration} instance = self.nova.instances.get(vm_uuid) if instance and instance.memory_mbytes is not None: info['resources'] = { 'memory_mbytes': instance.memory_mbytes } if samples: # regressions imply that the progress counter had one or # more decreases before increasing again. info['regressions'] = {} for rtype, values in samples.items(): if 'iterations' not in info: info['iterations'] = len(values) loops = utils.sample_set_regressions(values) info['regressions'][rtype] = loops if vm_uuid in migration_info: migration_info[vm_uuid].append(info) else: migration_info[vm_uuid] = [info] # section name expected to be live-migration return migration_info, event.section
def osds_pgs_suboptimal(self): _osds_pgs = {} for osd, num_pgs in self.osds_pgs.items(): # allow 30% margin from optimal OSD_PG_OPTIMAL_NUM_* values margin_high = self.OSD_PG_OPTIMAL_NUM_MAX * 1.3 margin_low = self.OSD_PG_OPTIMAL_NUM_MIN * .7 if margin_high < num_pgs or margin_low > num_pgs: _osds_pgs[osd] = num_pgs return utils.sorted_dict(_osds_pgs, key=lambda e: e[1], reverse=True)
def service_info(self): """Return a dictionary of systemd services grouped by state. """ info = {} for svc, obj in sorted_dict(self.services).items(): state = obj.state if state not in info: info[state] = [] info[state].append(svc) return info
def _all(self): if self._all_snaps: return self._all_snaps if not self.snap_list_all: return {} _core = {} _other = {} all_exprs = self.core_snap_exprs + self.other_snap_exprs for line in self.snap_list_all: for snap in all_exprs: name, version = self._get_snap_info_from_line(line, snap) if not name: continue # only show latest version installed if snap in self.core_snap_exprs: if name in _core: if version > _core[name]: _core[name] = version else: _core[name] = version else: if name in _other: if version > _other[name]: _other[name] = version else: _other[name] = version # ensure sorted self._core_snaps = sorted_dict(_core) self._other_snaps = sorted_dict(_other) combined = {} combined.update(_core) combined.update(_other) self._all_snaps = sorted_dict(combined) return self._all_snaps
def connection_refused(self, event): events = {} ports_max = {} context = {} for result in event.results: month = datetime.datetime.strptime(result.get(1), '%b').month day = result.get(2) year = result.get(3) ts_date = "{}-{}-{}".format(year, month, day) addr = result.get(4) port = result.get(5) addr = "{}:{}".format(addr, port) if result.source in context: context[result.source].append(result.linenumber) else: context[result.source] = [result.linenumber] if ts_date not in events: events[ts_date] = {} if addr not in events[ts_date]: events[ts_date][addr] = 1 else: events[ts_date][addr] += 1 for addrs in events.values(): for addr, count in addrs.items(): port = addr.partition(':')[2] # allow a small number of connection refused errors on a given # day if count < 5: continue if port not in ports_max: ports_max[port] = count else: ports_max[port] = max(count, ports_max[port]) if ports_max: msg = ('apache is reporting connection refused errors for the ' 'following ports which could mean some services are not ' 'working properly - {} - please check.'. format(','.join(ports_max.keys()))) IssuesManager().add(OpenstackWarning(msg), IssueContext(**context)) return sorted_dict(events)
def process_info(self): """Return a list of processes associated with services. """ return [ "{} ({})".format(name, count) for name, count in sorted_dict(self.processes).items() ]