def _initialize(self): self._journal_path = self._config.get("journal_path") if len(self._journal_path) == 0: self._journal_path = None if self._journal_path and not os.path.exists(self._journal_path): raise BadMonitorConfiguration( "journal_path '%s' does not exist or is not a directory" % self._journal_path, "journal_path", ) self._id = self._config.get("id") self._checkpoint_name = self.module_name # handle case where id is either None or empty if self._id: self._checkpoint_name = self._id data_path = "" if self._global_config: data_path = self._global_config.agent_data_path self._checkpoint_file = os.path.join(data_path, "journald-checkpoints.json") self._staleness_threshold_secs = self._config.get( "staleness_threshold_secs") self._journal = None self._poll = None # override the sample_interval self.set_sample_interval(self._config.get("journal_poll_interval")) self.log_config["parser"] = "journald" self._extra_fields = self._config.get("journal_fields") self._last_cursor = None matches = self._config.get("journal_matches") if matches is None: matches = [] match_re = re.compile("^([^=]+)=(.+)$") for match in matches: if not match_re.match(match): raise BadMonitorConfiguration( "journal matchers expects the following format for each element: FIELD=value. Found: %s" % match, "journal_matches", ) self._matches = matches if self._extra_fields is None: self._extra_fields = { "_SYSTEMD_UNIT": "unit", "_PID": "pid", "_MACHINE_ID": "machine_id", "_BOOT_ID": "boot_id", "_SOURCE_REALTIME_TIMESTAMP": "timestamp", }
def _initialize(self): # validate the list of types to dump objects for object_dump_types = self._config.get("object_dump_types") if object_dump_types is None: object_dump_types = [] for t in object_dump_types: if not isinstance(t, six.string_types): raise BadMonitorConfiguration( "object_dump_types contains a non-string value: %s" % six.text_type(t), "object_dump_types", ) # and convert the JsonArray to a python list self._object_dump_types = [t for t in object_dump_types] # original debug flags of the gc self._old_debug_flags = None self._monitor_all_unreachable = self._config.get( "monitor_all_unreachable_objects" ) self._max_type_dump = self._config.get("max_type_dump") self._max_object_dump = self._config.get("max_object_dump") self._monitor_garbage = self._config.get("monitor_garbage_objects") self._monitor_live = self._config.get("monitor_live_objects")
def _initialize(self): # validate the list of types to dump objects for object_dump_types = self._config.get('object_dump_types') if object_dump_types is None: object_dump_types = [] for t in object_dump_types: if not isinstance(t, basestring): raise BadMonitorConfiguration( "object_dump_types contains a non-string value: %s" % str(t)) # and convert the JsonArray to a python list self._object_dump_types = [t for t in object_dump_types] # original debug flags of the gc self._old_debug_flags = None self._monitor_all_unreachable = self._config.get( 'monitor_all_unreachable_objects') self._max_type_dump = self._config.get('max_type_dump') self._max_object_dump = self._config.get('max_object_dump')
def _initialize(self): self._max_log_rotations = self._config.get("max_log_rotations") self._max_log_size = self._config.get("max_log_size") self._journal_path = self._config.get("journal_path") if len(self._journal_path) == 0: self._journal_path = None if self._journal_path and not os.path.exists(self._journal_path): raise BadMonitorConfiguration( "journal_path '%s' does not exist or is not a directory" % self._journal_path, "journal_path", ) self._id = self._config.get("id") self._checkpoint_name = self.module_name # handle case where id is either None or empty if self._id: self._checkpoint_name = self._id data_path = "" if self._global_config: data_path = self._global_config.agent_data_path self._checkpoint_file = os.path.join(data_path, "journald-checkpoints.json") self._staleness_threshold_secs = self._config.get( "staleness_threshold_secs") self._journal = None self._poll = None # override the sample_interval self.set_sample_interval(self._config.get("journal_poll_interval")) self.log_config["parser"] = "journald" self._extra_fields = self._config.get("journal_fields") if self._extra_fields is not None: for field_name in self._extra_fields: fixed_field_name = scalyr_logging.AgentLogger.__force_valid_metric_or_field_name( field_name, is_metric=False) if field_name != fixed_field_name: self._extra_fields[fixed_field_name] = self._extra_fields[ field_name] del self._extra_fields[fixed_field_name] self._last_cursor = None matches = self._config.get("journal_matches") if matches is None: matches = [] match_re = re.compile("^([^=]+)=(.+)$") for match in matches: if not match_re.match(match): raise BadMonitorConfiguration( "journal matchers expects the following format for each element: FIELD=value. Found: %s" % match, "journal_matches", ) self._matches = matches if self._extra_fields is None: self._extra_fields = { "_SYSTEMD_UNIT": "unit", "_PID": "pid", "_MACHINE_ID": "machine_id", "_BOOT_ID": "boot_id", "_SOURCE_REALTIME_TIMESTAMP": "timestamp", } # Closing the default logger since we aren't going to use it, instead allowing LogConfigManager to provide us # with loggers self._logger.closeMetricLog() self.log_manager = LogConfigManager( self._global_config, JournaldLogFormatter(), self._max_log_size, self._max_log_rotations, ) self.log_config = self.log_manager.get_config(".*")