class _GitMetricCollector(object): """Class for collecting metrics about a git repository. The constructor takes the arguments: `gitdir`, `metric_path`. `gitdir` is the path to the Git directory to collect metrics for and may start with a tilde (expanded to a user's home directory). `metric_path` is the Monarch metric path to report to. """ _commit_hash_metric = metrics.StringMetric( 'git/hash', description='Current Git commit hash.') _timestamp_metric = metrics.GaugeMetric( 'git/timestamp', description='Current Git commit time as seconds since Unix Epoch.') _unstaged_changes_metric = metrics.GaugeMetric( 'git/unstaged_changes', description='Unstaged Git changes.') def __init__(self, gitdir, metric_path): self._gitdir = gitdir self._gitrepo = _GitRepo(os.path.expanduser(gitdir)) self._fields = {'repo': gitdir} self._metric_path = metric_path def collect(self): """Collect metrics.""" try: self._collect_commit_hash_metric() self._collect_timestamp_metric() self._collect_unstaged_changes_metric() except subprocess.CalledProcessError as e: logger.warning(u'Error collecting git metrics for %s: %s', self._gitdir, e) def _collect_commit_hash_metric(self): commit_hash = self._gitrepo.get_commit_hash() logger.debug(u'Collecting Git hash %r for %r', commit_hash, self._gitdir) self._commit_hash_metric.set(commit_hash, self._fields) def _collect_timestamp_metric(self): commit_time = self._gitrepo.get_commit_time() logger.debug(u'Collecting Git timestamp %r for %r', commit_time, self._gitdir) self._timestamp_metric.set(commit_time, self._fields) def _collect_unstaged_changes_metric(self): added, deleted = self._gitrepo.get_unstaged_changes() self._unstaged_changes_metric.set(added, fields=dict(change_type='added', **self._fields)) self._unstaged_changes_metric.set(deleted, fields=dict(change_type='deleted', **self._fields))
'dev/proc/load_average', description='Number of processes currently ' 'in the system run queue.') # ts_mon pipeline uses backend clocks when assigning timestamps to metric # points. By comparing point timestamp to the point value (i.e. time by # machine's local clock), we can potentially detect some anomalies (clock # drift, unusually high metrics pipeline delay, completely wrong clocks, etc). # # It is important to gather this metric right before the flush. _unix_time_metric = metrics.GaugeMetric( 'dev/unix_time', description='Number of milliseconds since epoch' ' based on local machine clock.') _os_name_metric = metrics.StringMetric('proc/os/name', description='OS name on the machine') _os_version_metric = metrics.StringMetric( 'proc/os/version', description='OS version on the machine') _os_arch_metric = metrics.StringMetric( 'proc/os/arch', description='OS architecture on this machine') _python_arch_metric = metrics.StringMetric('proc/python/arch', description='python userland ' 'architecture on this machine') def collect_uptime(): _uptime_metric.set(int(time.time() - _BOOT_TIME))
import yaml from chromite.lib import cros_logging as logging from chromite.lib import metrics logger = logging.getLogger(__name__) LAST_RUN_FILE = '/var/lib/cros_puppet/state/last_run_summary.yaml' _config_version_metric = metrics.GaugeMetric( 'puppet/version/config', description='The version of the puppet configuration.' ' By default this is the time that the configuration was parsed') _puppet_version_metric = metrics.StringMetric( 'puppet/version/puppet', description='Version of puppet client installed.') _events_metric = metrics.GaugeMetric( 'puppet/events', description='Number of changes the puppet client made to the system in its' ' last run, by success or failure') _resources_metric = metrics.GaugeMetric( 'puppet/resources', description='Number of resources known by the puppet client in its last' ' run') _times_metric = metrics.FloatMetric( 'puppet/times', description='Time taken to perform various parts of the last puppet run') _age_metric = metrics.FloatMetric( 'puppet/age', description='Time since last run')
(_net_if_mtu_metric, 'mtu'), ) def _collect_net_if_stats(): """Collect metrics for network interface stats.""" for nic, stats in psutil.net_if_stats().items(): if _is_virtual_netif(nic): continue fields = {'interface': nic} for metric, counter_name in _net_if_metrics: metric.set(getattr(stats, counter_name), fields=fields) _net_if_addrs_metrics = metrics.StringMetric( 'dev/net/address', description='Network address of physical network interfaces.') _family_field_strings = { psutil.AF_LINK: 'AF_LINK', socket.AF_INET: 'AF_INET', socket.AF_INET6: 'AF_INET6', } def _collect_net_if_addrs(): """Collects network addresses as metrics.""" for nic, addresses in psutil.net_if_addrs().items(): if _is_virtual_netif(nic): continue for address in addresses: fields = {