示例#1
0
    def test_atomic_write_dict_as_json_file(self):
        info = {'a': "hi"}
        scalyr_util.atomic_write_dict_as_json_file(self.__path,
                                                   self.__path + '~', info)

        json_object = scalyr_util.read_file_as_json(self.__path)
        self.assertEquals(json_object, JsonObject(a='hi'))
示例#2
0
    def test_atomic_write_dict_as_json_file(self):
        info = {"a": "hi"}
        scalyr_util.atomic_write_dict_as_json_file(self.__path,
                                                   self.__path + "~", info)

        json_object = scalyr_util.read_file_as_json(self.__path)
        self.assertEquals(json_object, info)
示例#3
0
    def __read_checkpoint_state(self):
        """Reads the checkpoint state from disk and returns it.

        The checkpoint state maps each file path to the offset within that log file where we left off copying it.

        @return:  The checkpoint state
        @rtype: dict
        """
        file_path = os.path.join(self.__config.agent_data_path,
                                 'checkpoints.json')

        if not os.path.isfile(file_path):
            log.info(
                'The log copying checkpoint file "%s" does not exist, skipping.'
                % file_path)
            return None

        # noinspection PyBroadException
        try:
            return scalyr_util.read_file_as_json(file_path)
        except Exception:
            # TODO:  Fix read_file_as_json so that it will not return an exception.. or will return a specific one.
            log.exception('Could not read checkpoint file due to error.',
                          error_code='failedCheckpointRead')
            return None
def read_checkpoint_state_from_file(
        file_path,
        logger):  # type: (six.text_type, AgentLogger) -> Optional[Dict]
    """
    Read checkpoint file from the given path and handle some basic error, if occurred.
    """

    if not os.path.exists(file_path):
        return None

    # noinspection PyBroadException
    try:
        checkpoints = scalyr_util.read_file_as_json(file_path,
                                                    strict_utf8=True)
    except Exception:
        logger.exception(
            "Cannot read the checkpoint file {0}.".format(file_path))
        return None

    # the data in the file was somehow corrupted so it can not be read as dict.
    if not isinstance(checkpoints, dict):
        logger.error(
            "The checkpoint file data has to be de-serialized into dict.")
        return None

    return checkpoints
    def __load_checkpoints( self ):

        checkpoints = None
        try:
            checkpoints = scalyr_util.read_file_as_json( self.__checkpoint_file )
        except:
            self._logger.info( "No checkpoint file '%s' exists.\nAll logs will be read starting from their current end.", self.__checkpoint_file )
            checkpoints = {}

        self.__api.load_checkpoints( checkpoints )
    def __load_checkpoints( self ):
        try:
            checkpoints = scalyr_util.read_file_as_json( self.__checkpoint_file )
        except:
            self._logger.info( "No checkpoint file '%s' exists.\n\tAll logs will be read starting from their current end.", self.__checkpoint_file )
            checkpoints = {}

        if checkpoints:
            for name, last_request in checkpoints.iteritems():
                self.__checkpoints[name] = last_request
示例#7
0
    def __load_checkpoints( self ):

        checkpoints = None
        try:
            checkpoints = scalyr_util.read_file_as_json( self.__checkpoint_file )
        except:
            self._logger.info( "No checkpoint file '%s' exists.\nAll logs will be read starting from their current end.", self.__checkpoint_file )
            checkpoints = {}

        self.__api.load_checkpoints( checkpoints )
示例#8
0
    def __load_checkpoints(self):
        try:
            checkpoints = scalyr_util.read_file_as_json(self.__checkpoint_file)
        except:
            self._logger.info(
                "No checkpoint file '%s' exists.\n\tAll logs will be read starting from their current end.",
                self.__checkpoint_file)
            checkpoints = {}

        if checkpoints:
            for name, last_request in checkpoints.iteritems():
                self.__checkpoints[name] = last_request
    def __load_checkpoints( self ):

        checkpoints = None
        try:
            checkpoints = scalyr_util.read_file_as_json( self.__checkpoint_file )
        except:
            self._logger.info( "No checkpoint file '%s' exists.\nAll logs will be read starting from their current end.", self.__checkpoint_file )
            checkpoints = {}

        if checkpoints:
            for source, record_number in checkpoints.iteritems():
                self.__checkpoints[source] = record_number
示例#10
0
def load_checkpoints(filename):
    """
    Atomically loads checkpoints from a file.  The checkpoints are only ever loaded from disk once,
    and any future calls to this function return the in-memory checkpoints of the first successfully completed call.
    @param filename: the path on disk to a JSON file to load checkpoints from
    """
    result = None
    _global_lock.acquire()
    try:
        if filename in _global_checkpoints:
            result = _global_checkpoints[filename]
    finally:
        _global_lock.release()

    # if checkpoints already exist for this file, return the in memory copy
    if result is not None:
        return result

    # read from the file on disk
    checkpoints = {}
    try:
        checkpoints = scalyr_util.read_file_as_json(filename, strict_utf8=True)
    except:
        global_log.log(
            scalyr_logging.DEBUG_LEVEL_1,
            "No checkpoint file '%s' exists.\n\tAll journald logs for '%s' will be read starting from their current end.",
            filename,
        )
        checkpoints = {}

    _global_lock.acquire()
    try:
        # check if another thread created Checkpoints for this file
        # while we were loading from disk and if so, return
        # the in memory copy
        if filename in _global_checkpoints:
            result = _global_checkpoints[filename]
        else:
            # checkpoints for this file haven't been created yet, so
            # create them and store them in the global checkpoints dict
            result = Checkpoint(filename, checkpoints)
            _global_checkpoints[filename] = result
    finally:
        _global_lock.release()

    return result
    def __read_checkpoint_state(self):
        """Reads the checkpoint state from disk and returns it.

        The checkpoint state maps each file path to the offset within that log file where we left off copying it.

        @return:  The checkpoint state
        @rtype: dict
        """
        file_path = os.path.join(self.__config.agent_data_path, 'checkpoints.json')

        if not os.path.isfile(file_path):
            log.info('The log copying checkpoint file "%s" does not exist, skipping.' % file_path)
            return None

        # noinspection PyBroadException
        try:
            return scalyr_util.read_file_as_json(file_path)
        except Exception:
            # TODO:  Fix read_file_as_json so that it will not return an exception.. or will return a specific one.
            log.exception('Could not read checkpoint file due to error.', error_code='failedCheckpointRead')
            return None
示例#12
0
    def test_atomic_write_dict_as_json_file(self):
        info = { 'a': "hi" }
        scalyr_util.atomic_write_dict_as_json_file( self.__path, self.__path + '~', info )

        json_object = scalyr_util.read_file_as_json( self.__path )
        self.assertEquals( json_object, JsonObject( a='hi' ) )
示例#13
0
    def test_read_file_as_json(self):
        self.__create_file(self.__path, '{ a: "hi"}')

        json_object = scalyr_util.read_file_as_json(self.__path)
        self.assertEquals(json_object, JsonObject(a='hi'))
示例#14
0
    def test_read_file_as_json(self):
        self.__create_file(self.__path, '{ "a": "hi"}')

        value = scalyr_util.read_file_as_json(self.__path)
        self.assertEquals(value, {"a": "hi"})
示例#15
0
    def test_read_file_as_json(self):
        self.__create_file(self.__path, '{ a: "hi"}')

        json_object = scalyr_util.read_file_as_json(self.__path)
        self.assertEquals(json_object, JsonObject(a='hi'))
示例#16
0
    def parse(self):
        self.__read_time = time.time()

        try:
            try:
                # First read the file.  This makes sure it exists and can be parsed.
                self.__config = scalyr_util.read_file_as_json(self.__file_path)

                # What implicit entries do we need to add?  metric monitor, agent.log, and then logs from all monitors.
            except JsonReadFileException, e:
                raise BadConfiguration(str(e), None, 'fileParseError')

            # Import any requested variables from the shell and use them for substitutions.
            self.__import_shell_variables()
            self.__perform_substitutions()

            self.__verify_main_config_and_apply_defaults(self.__config, self.__file_path)
            self.__verify_logs_and_monitors_configs_and_apply_defaults(self.__config, self.__file_path)

            # Now, look for any additional configuration in the config fragment directory.
            for fp in self.__list_files(self.config_directory):
                self.__additional_paths.append(fp)
                content = scalyr_util.read_file_as_json(fp)
                for k in content.keys():
                    if k not in ('logs', 'monitors', 'server_attributes'):
                        self.__last_error = BadConfiguration(
                            'Configuration fragment file "%s" contains an invalid key "%s".  The config files in the '
                            'configuration directory can only contain "logs", "monitors", and "server_attributes" '
                            'entries.' % (fp, k), k, 'badFragmentKey')
                        raise self.__last_error

                self.__verify_logs_and_monitors_configs_and_apply_defaults(content, fp)

                self.__add_elements_from_array('logs', content, self.__config)
                self.__add_elements_from_array('monitors', content, self.__config)
                self.__merge_server_attributes(fp, content, self.__config)

            # Add in 'serverHost' to server_attributes if it is not set.  We must do this after merging any
            # server attributes from the config fragments.
            if 'serverHost' not in self.server_attributes:
                self.__config['server_attributes']['serverHost'] = socket.gethostname()

            # Add in implicit entry to collect the log generated by this agent.
            agent_log = None
            if self.implicit_agent_log_collection:
                config = JsonObject(path='agent.log')
                self.__verify_log_entry_and_set_defaults(config, description='implicit rule')
                agent_log = config

            # Add in any platform-specific monitors.
            platform_monitors = []
            for monitor in self.__default_monitors:
                config = JsonObject(content=monitor)
                self.__verify_monitor_entry_and_set_defaults(config, 'default monitors for platform', -1)
                platform_monitors.append(config)

            all_logs = list(self.__config.get_json_array('logs'))
            if agent_log is not None:
                all_logs.append(agent_log)

            # We need to go back and fill in the monitor id if it is not set.  We do this by keeping a count of
            # how many monitors we have with the same module name (just considering the last element of the module
            # path).  We use the shortened form of the module name because that is used when emitting lines for
            # this monitor in the logs -- see scalyr_logging.py.
            monitors_by_module_name = {}
            # Tracks which modules already had an id present in the module config.
            had_id = {}
            all_monitors = list(self.__config.get_json_array('monitors'))
            for monitor in platform_monitors:
                all_monitors.append(monitor)

            for entry in all_monitors:
                module_name = entry['module'].split('.')[-1]
                if not module_name in monitors_by_module_name:
                    index = 1
                else:
                    index = monitors_by_module_name[module_name] + 1
                if 'id' not in entry:
                    entry['id'] = index
                else:
                    had_id[module_name] = True

                monitors_by_module_name[module_name] = index

            # Just as a simplification, if there is only one monitor with a given name, we remove the monitor_id
            # to clean up it's name in the logs.
            for entry in all_monitors:
                module_name = entry['module'].split('.')[-1]
                if monitors_by_module_name[module_name] == 1 and not module_name in had_id:
                    entry['id'] = ''

            # Now build up __logs to have an object created for each log entry, and __monitors to have an object
            # created for each monitor entry.
            for entry in all_logs:
                # Automatically add in the parser to the attributes section.  We make a copy of the object first
                # just to be safe.
                entry = JsonObject(content=entry)
                if 'parser' in entry:
                    entry['attributes']['parser'] = entry['parser']

                if self.__log_factory is not None:
                    self.__logs.append(self.__log_factory(entry))

            if self.__monitor_factory is not None:
                for entry in all_monitors:
                    self.__monitors.append(self.__monitor_factory(entry, self.additional_monitor_module_paths))

            # Get all of the paths for the logs currently being copied.
            all_paths = {}
            for entry in self.__logs:
                all_paths[entry.log_path] = True

            # Now add in a logs entry for each monitor's log file if there is not already
            # an entry for it.
            for entry in self.__monitors:
                log_config = entry.log_config
                if type(log_config) is dict:
                    log_config = JsonObject(content=log_config)

                # If the log config does not specify a parser, we add it in.
                self.__verify_or_set_optional_string(log_config, 'parser', 'agent-metrics',
                                                     'log entry requested by module "%s"' % entry.module_name)
                self.__verify_log_entry_and_set_defaults(
                    log_config, description='log entry requested by module "%s"' % entry.module_name)

                path = log_config['path']
                # Update the monitor to have the complete log config entry.  This also guarantees that the path
                # is absolute.
                entry.log_config = log_config
                if not path in all_paths:
                    if 'parser' in log_config:
                        log_config['attributes']['parser'] = log_config['parser']
                    if self.__log_factory is not None:
                        self.__logs.append(self.__log_factory(log_config))
                    all_paths[path] = True