def __init__(self, configuration, monitors): """Initializes the manager. Note, the log_config variable on the monitors will be updated as a side effect of this call to reflect the filling in of defaults and making paths absolute. TODO: This is kind of odd, it would be cleaner to do this elsewhere more tied to the monitors themselves. @param configuration: The configuration file containing which log files need to be copied listed in the configuration file. @param monitors: The list of ScalyrMonitor instances that will be run. This is needed so the manager can be sure to copy the logs files generated by the monitors. Note, the log_config for the monitors will be updated (on the monitor) to reflect the filling in of defaults and making paths absolute. @type configuration: configuration.Configuration @type monitors: list<ScalyrMonitor> """ StoppableThread.__init__(self, name='log copier thread') self.__config = configuration # The list of LogMatcher objects that are watching for new files to appear. self.__log_matchers = CopyingManager.__create_log_matches(configuration, monitors) # The list of LogFileProcessors that are processing the lines from matched log files. self.__log_processors = [] # A dict from file path to the LogFileProcessor that is processing it. self.__log_paths_being_processed = {} # A lock that protects the status variables and the __log_matchers variable, the only variables that # are access in generate_status() which needs to be thread safe. self.__lock = threading.Lock() # The current pending AddEventsTask. We will retry the contained AddEventsRequest serveral times. self.__pending_add_events_task = None # The next LogFileProcessor that should have log lines read from it for transmission. self.__current_processor = 0 # The client to use for sending the data. Set in the start_manager call. self.__scalyr_client = None # The last time we scanned for new files that match the __log_matchers. self.__last_new_file_scan_time = 0 # Status variables that track statistics reported to the status page. self.__last_attempt_time = None self.__last_success_time = None self.__last_attempt_size = None self.__last_response = None self.__last_response_status = None self.__total_bytes_uploaded = 0 self.__total_errors = 0 # The positions to use for a given file if there is not already a checkpoint for that file. # Set in the start_manager call. self.__logs_initial_positions = None # A semaphore that we increment when this object has begun copying files (after first scan). self.__copying_semaphore = threading.Semaphore()
def __init__(self, local=None, host='localhost', port=2000): self.__server_socket = None # The open connections. self.__connections = [] # Any local variables to set for the interactive shell. This is a dict. self.__local = local # The IP address to server the connections from. self.__host = host # The port. self.__port = port StoppableThread.__init__(self, 'debug server thread')
def __init__(self, scalyr_client, configuration, logs_initial_positions): """Initializes the manager. @param scalyr_client: The client to use to send requests to Scalyr. @param configuration: The configuration file containing which log files need to be copied. @param logs_initial_positions: A dict mapping file paths to the offset with the file to begin copying if none can be found from the checkpoint files. This can be used to override the default behavior of just reading from the current end of the file if there is no checkpoint for the file @type scalyr_client: scalyr_client.ScalyrClientSession @type configuration: configuration.Configuration @type logs_initial_positions: dict """ StoppableThread.__init__(self, name='log copier thread') self.__config = configuration # The list of LogMatcher objects that are watching for new files to appear. self.__log_matchers = configuration.logs # The list of LogFileProcessors that are processing the lines from matched log files. self.__log_processors = [] # A dict from file path to the LogFileProcessor that is processing it. self.__log_paths_being_processed = {} # A lock that protects the status variables and the __log_matchers variable, the only variables that # are access in generate_status() which needs to be thread safe. self.__lock = threading.Lock() # The current pending AddEventsTask. We will retry the contained AddEventsRequest serveral times. self.__pending_add_events_task = None # The next LogFileProcessor that should have log lines read from it for transmission. self.__current_processor = 0 # The client to use for sending the data. self.__scalyr_client = scalyr_client # The last time we scanned for new files that match the __log_matchers. self.__last_new_file_scan_time = 0 # Status variables that track statistics reported to the status page. self.__last_attempt_time = None self.__last_success_time = None self.__last_attempt_size = None self.__last_response = None self.__last_response_status = None self.__total_bytes_uploaded = 0 self.__total_errors = 0 # The positions to use for a given file if there is not already a checkpoint for that file. self.__logs_initial_positions = logs_initial_positions # A semaphore that we increment when this object has begun copying files (after first scan). self.__copying_semaphore = threading.Semaphore()
def __init__(self, monitor_config, logger, sample_interval_secs=30): """Constructs an instance of the monitor. It is optional for derived classes to override this method. The can instead override _initialize which is invoked during initialization. TODO: Determine which approach is preferred by developers and recommend that. If a derived class overrides __init__, they must invoke this method in the override method. This method will set default values forall public attributes (log_config, disabled, etc). These may be overwritten by the derived class. The derived classes must raise an Exception (or something derived from Exception) in this method if the provided configuration is invalid or if there is any other error known at this time preventing the module from running. @param monitor_config: A dict containing the configuration information for this module instance from the configuration file. The only valid values are strings, ints, longs, floats, and booleans. @param logger: The logger to use for output. @param sample_interval_secs: The interval in seconds to wait between gathering samples. """ # The logger instance that this monitor should use to report all information and metric values. self._logger = logger self.monitor_name = monitor_config['module'] # The MonitorConfig object created from the config for this monitor instance. self._config = MonitorConfig(monitor_config, monitor_module=self.monitor_name) log_path = self.monitor_name.split('.')[-1] + '.log' self.disabled = False # TODO: For now, just leverage the logic in the loggers for naming this monitor. However, # we should have it be more dynamic where the monitor can override it. if logger.component.find('monitor:') == 0: self.monitor_name = logger.component[8:] else: self.monitor_name = logger.component self.log_config = { "path": log_path, } # This lock protects all variables that can be access by other threads, reported_lines, # emitted_lines, and errors. It does not protect _run_state since that already has its own lock. self.__lock = Lock() self.__reported_lines = 0 self.__errors = 0 self._sample_interval_secs = sample_interval_secs self.__metric_log_open = False self._initialize() StoppableThread.__init__(self, name='metric thread')
def __init__(self, monitor_config, logger, sample_interval_secs=30): """Constructs an instance of the monitor. It is optional for derived classes to override this method. The can instead override _initialize which is invoked during initialization. TODO: Determine which approach is preferred by developers and recommend that. If a derived class overrides __init__, they must invoke this method in the override method. This method will set default values forall public attributes (log_config, disabled, etc). These may be overwritten by the derived class. The derived classes must raise an Exception (or something derived from Exception) in this method if the provided configuration is invalid or if there is any other error known at this time preventing the module from running. @param monitor_config: A dict containing the configuration information for this module instance from the configuration file. The only valid values are strings, ints, longs, floats, and booleans. @param logger: The logger to use for output. @param sample_interval_secs: The interval in seconds to wait between gathering samples. """ # The MonitorConfig object created from the config for this monitor instance. self._config = MonitorConfig(monitor_config) # The logger instance that this monitor should use to report all information and metric values. self._logger = logger self.monitor_name = monitor_config['module'] log_path = self.monitor_name.split('.')[-1] + '.log' self.disabled = False # TODO: For now, just leverage the logic in the loggers for naming this monitor. However, # we should have it be more dynamic where the monitor can override it. if logger.component.find('monitor:') == 0: self.monitor_name = logger.component[8:] else: self.monitor_name = logger.component self.log_config = { "path": log_path, } # This lock protects all variables that can be access by other threads, reported_lines, # emitted_lines, and errors. It does not protect _run_state since that already has its own lock. self.__lock = Lock() self.__reported_lines = 0 self.__errors = 0 self._sample_interval_secs = sample_interval_secs self.__metric_log_open = False self._initialize() StoppableThread.__init__(self, name='metric thread')
def __init__(self, local, client_connection, host, port): """Initializes the connection. @param local: The dict of local variables to populate into the envinroment the interactive shell is run in. @param client_connection: The network connection @param host: the client's IP address @param port: the client's port @type local: dict @type client_connection: @type host: str @type port: int """ self.__local = local self.__client_connection = client_connection self.__host = host self.__port = port StoppableThread.__init__(self, 'Debug connection thread')
def __init__(self, configuration, platform_controller): """Initializes the manager. @param configuration: The agent configuration that controls what monitors should be run. @param platform_controller: The controller for this server. @type configuration: scalyr_agent.Configuration @type platform_controller: scalyr_agent.platform_controller.PlatformController """ StoppableThread.__init__(self, name='monitor manager thread') if configuration.disable_monitors_creation: log.log( scalyr_logging.DEBUG_LEVEL_0, "Creation of Scalyr Monitors disabled. No monitors created." ) self.__monitors = [] else: self.__monitors = MonitorsManager.__create_monitors(configuration, platform_controller) self.__disable_monitor_threads = configuration.disable_monitor_threads self.__running_monitors = [] self.__user_agent_callback = None self._user_agent_refresh_interval = configuration.user_agent_refresh_interval
def __init__(self2): StoppableThread.__init__(self2, name='FakeClockAdvancerThread', is_daemon=True)
def __init__(self, capture_interval=10, *args, **kwargs): StoppableThread.__init__(self)
def __init__(self2): # pylint: disable=no-self-argument StoppableThread.__init__( self2, name="FakeClockAdvancerThread", is_daemon=True )
def __init__(self, monitor_config, logger, sample_interval_secs=None, global_config=None): """Constructs an instance of the monitor. It is optional for derived classes to override this method. The can instead override _initialize which is invoked during initialization. TODO: Determine which approach is preferred by developers and recommend that. If a derived class overrides __init__, they must invoke this method in the override method. This method will set default values for all public attributes (log_config, disabled, etc). These may be overwritten by the derived class. The derived classes must raise an Exception (or something derived from Exception) in this method if the provided configuration is invalid or if there is any other error known at this time preventing the module from running. @param monitor_config: A dict containing the configuration information for this module instance from the configuration file. The only valid values are strings, ints, longs, floats, and booleans. @param logger: The logger to use for output. @param sample_interval_secs: The interval in seconds to wait between gathering samples. If None, it will set the value from the ``sample_interval`` field in the monitor_config if present, or the default interval time for all monitors in ``DEFAULT_SAMPLE_INTERVAL_SECS``. Generally, you should probably pass None here and allow the value to be taken from the configuration files. @param global_config: the global configuration object. Monitors can use or ignore this as necessary """ # The logger instance that this monitor should use to report all information and metric values. self._logger = logger self.monitor_name = monitor_config['module'] # save the global config self._global_config = global_config # The MonitorConfig object created from the config for this monitor instance. self._config = MonitorConfig(monitor_config, monitor_module=self.monitor_name) log_path = self.monitor_name.split('.')[-1] + '.log' self.disabled = False # TODO: For now, just leverage the logic in the loggers for naming this monitor. However, # we should have it be more dynamic where the monitor can override it. if logger.component.find('monitor:') == 0: self.monitor_name = logger.component[8:] else: self.monitor_name = logger.component self.log_config = { "path": log_path, } # This lock protects all variables that can be access by other threads, reported_lines, # emitted_lines, and errors. It does not protect _run_state since that already has its own lock. self.__lock = Lock() self.__reported_lines = 0 self.__errors = 0 # Set the time between samples for this monitor. We prefer configuration values over the values # passed into the constructor. if sample_interval_secs is not None: self._sample_interval_secs = sample_interval_secs else: self._sample_interval_secs = self._config.get('sample_interval', convert_to=float, default=ScalyrMonitor.DEFAULT_SAMPLE_INTERVAL_SECS) self.__metric_log_open = False # These variables control the rate limiter on how fast we can write to the metric log. # The first one is the average number of bytes that can be written per second. This is the bucket fill rate # in the "leaky bucket" algorithm used to calculate the rate limit. Derived classes may change this. self._log_write_rate = self._config.get('monitor_log_write_rate', convert_to=int, default=2000) # This is the maximum size of a write burst to the log. This is the bucket size in the "leaky bucket" algorithm # used to calculate the rate limit. Derived classes may change this. self._log_max_write_burst = self._config.get('monitor_log_max_write_burst', convert_to=int, default=100000) # This is the number of seconds between waiting to flush the metric log (if there are pending bytes that # need to be flushed to disk). If this is greater than zero, then it will reduce the amount of disk # flushing, but at the cost of possible loss of data if the agent shutdowns down unexpectantly. self._log_flush_delay = self._config.get('monitor_log_flush_delay', convert_to=float, default=0.0, min_value=0) self._initialize() StoppableThread.__init__(self, name='metric thread')
def __init__(self, configuration, monitors): """Initializes the manager. Note, the log_config variable on the monitors will be updated as a side effect of this call to reflect the filling in of defaults and making paths absolute. TODO: This is kind of odd, it would be cleaner to do this elsewhere more tied to the monitors themselves. @param configuration: The configuration file containing which log files need to be copied listed in the configuration file. @param monitors: The list of ScalyrMonitor instances that will be run. This is needed so the manager can be sure to copy the logs files generated by the monitors. Note, the log_config for the monitors will be updated (on the monitor) to reflect the filling in of defaults and making paths absolute. @type configuration: configuration.Configuration @type monitors: list<ScalyrMonitor> """ StoppableThread.__init__(self, name='log copier thread') self.__config = configuration # Keep track of monitors self.__monitors = monitors # We keep track of which paths we have configs for so that when we add in the configuration for the monitor # log files we don't re-add in the same path. This can easily happen if a monitor is used multiple times # but they are all just writing to the same monitor file. self.__all_paths = {} # The list of LogMatcher objects that are watching for new files to appear. self.__log_matchers = self.__create_log_matches( configuration, monitors) # The list of LogFileProcessors that are processing the lines from matched log files. self.__log_processors = [] # A dict from file path to the LogFileProcessor that is processing it. self.__log_paths_being_processed = {} # A lock that protects the status variables and the __log_matchers variable, the only variables that # are access in generate_status() which needs to be thread safe. self.__lock = threading.Lock() # The current pending AddEventsTask. We will retry the contained AddEventsRequest serveral times. self.__pending_add_events_task = None # The next LogFileProcessor that should have log lines read from it for transmission. self.__current_processor = 0 # The client to use for sending the data. Set in the start_manager call. self.__scalyr_client = None # The last time we scanned for new files that match the __log_matchers. self.__last_new_file_scan_time = 0 # Status variables that track statistics reported to the status page. self.__last_attempt_time = None self.__last_success_time = None self.__last_attempt_size = None self.__last_response = None self.__last_response_status = None self.__total_bytes_uploaded = 0 self.__total_errors = 0 # The positions to use for a given file if there is not already a checkpoint for that file. # Set in the start_manager call. self.__logs_initial_positions = None # A semaphore that we increment when this object has begun copying files (after first scan). self.__copying_semaphore = threading.Semaphore() #set the log watcher variable of all monitors. Do this last so everything is set up #and configured when the monitor receives this call for monitor in monitors: monitor.set_log_watcher(self)
def __init__(self): self.run_counter = 0 StoppableThread.__init__(self, 'Test thread')
def __init__(self, monitor_config, logger, sample_interval_secs=None, global_config=None): """Constructs an instance of the monitor. It is optional for derived classes to override this method. The can instead override _initialize which is invoked during initialization. TODO: Determine which approach is preferred by developers and recommend that. If a derived class overrides __init__, they must invoke this method in the override method. This method will set default values for all public attributes (log_config, disabled, etc). These may be overwritten by the derived class. The derived classes must raise an Exception (or something derived from Exception) in this method if the provided configuration is invalid or if there is any other error known at this time preventing the module from running. @param monitor_config: A dict containing the configuration information for this module instance from the configuration file. The only valid values are strings, ints, longs, floats, and booleans. @param logger: The logger to use for output. @param sample_interval_secs: The interval in seconds to wait between gathering samples. If None, it will set the value from the ``sample_interval`` field in the monitor_config if present, or the default interval time for all monitors in ``DEFAULT_SAMPLE_INTERVAL_SECS``. Generally, you should probably pass None here and allow the value to be taken from the configuration files. @param global_config: the global configuration object. Monitors can use or ignore this as necessary """ # The logger instance that this monitor should use to report all information and metric values. self._logger = logger self.monitor_name = monitor_config['module'] # save the global config self._global_config = global_config # The MonitorConfig object created from the config for this monitor instance. self._config = MonitorConfig(monitor_config, monitor_module=self.monitor_name) log_path = self.monitor_name.split('.')[-1] + '.log' self.disabled = False # TODO: For now, just leverage the logic in the loggers for naming this monitor. However, # we should have it be more dynamic where the monitor can override it. if logger.component.find('monitor:') == 0: self.monitor_name = logger.component[8:] else: self.monitor_name = logger.component self.log_config = { "path": log_path, } # This lock protects all variables that can be access by other threads, reported_lines, # emitted_lines, and errors. It does not protect _run_state since that already has its own lock. self.__lock = Lock() self.__reported_lines = 0 self.__errors = 0 # Set the time between samples for this monitor. We prefer configuration values over the values # passed into the constructor. if sample_interval_secs is not None: self._sample_interval_secs = sample_interval_secs else: self._sample_interval_secs = self._config.get( 'sample_interval', convert_to=float, default=ScalyrMonitor.DEFAULT_SAMPLE_INTERVAL_SECS) self.__metric_log_open = False # These variables control the rate limiter on how fast we can write to the metric log. # The first one is the average number of bytes that can be written per second. This is the bucket fill rate # in the "leaky bucket" algorithm used to calculate the rate limit. Derived classes may change this. self._log_write_rate = self._config.get('monitor_log_write_rate', convert_to=int, default=2000) # This is the maximum size of a write burst to the log. This is the bucket size in the "leaky bucket" algorithm # used to calculate the rate limit. Derived classes may change this. self._log_max_write_burst = self._config.get( 'monitor_log_max_write_burst', convert_to=int, default=100000) # This is the number of seconds between waiting to flush the metric log (if there are pending bytes that # need to be flushed to disk). If this is greater than zero, then it will reduce the amount of disk # flushing, but at the cost of possible loss of data if the agent shutdowns down unexpectantly. self._log_flush_delay = self._config.get('monitor_log_flush_delay', convert_to=float, default=0.0, min_value=0) # If true, will adjust the sleep time between gather_sample calls by the time spent in gather_sample, rather # than sleeping the full sample_interval_secs time. self._adjust_sleep_by_gather_time = False self._initialize() StoppableThread.__init__(self, name='metric thread')
def __init__(self): self.run_counter = 0 StoppableThread.__init__(self, "Test thread")