コード例 #1
0
 def test_dumps_classic_log(self):
     """Test the classic log serialization"""
     serializer = ESSerializer()
     record = self.log.makeRecord(name=self.log.name,
                                  level=logging.INFO,
                                  fn=self.__class__.__name__,
                                  lno=58, msg="dump_classic_log",
                                  args=None,
                                  exc_info=False,
                                  func=None,
                                  extra=None)
     self.formatter.format(record)
     for value in record.__dict__.values():
         try:
             serializer.dumps(value)
         except TypeError:
             self.fail("Serializer raised a TypeError exception")
コード例 #2
0
 def test_dumps_log_with_extras_and_args(self):
     """ Test the log serialization with arguments and extras complex parameters"""
     serializer = ESSerializer()
     record = self.log.makeRecord(name=self.log.name,
                                  level=logging.ERROR,
                                  fn=self.__class__.__name__,
                                  lno=58, msg="dump_%s_log",
                                  args="args",
                                  exc_info=False,
                                  func=None,
                                  extra={'complexvalue1': datetime.date.today(),
                                         'complexvalue2': decimal.Decimal('3.0')})
     self.formatter.format(record)
     for value in record.__dict__.values():
         try:
             serializer.dumps(value)
         except TypeError:
             self.fail("Serializer raised a TypeError exception")
コード例 #3
0
 def test_exception_log_serialization_with_exc_info_field(self):
     serializer = ESSerializer()
     try:
         bad_idea = 1 / 0
     except ZeroDivisionError:
         record = self.log.makeRecord(name=self.log.name,
                                      level=logging.ERROR,
                                      fn=self.__class__.__name__,
                                      lno=58, msg="dump_exception_log",
                                      args=None,
                                      exc_info=sys.exc_info(),
                                      func=None,
                                      extra=None)
     self.formatter.format(record)
     for value in record.__dict__.values():
         try:
             serializer.dumps(value)
         except TypeError:
             self.fail("Serializer raised a TypeError exception")
コード例 #4
0
ファイル: handlers.py プロジェクト: advlad/eslogging
    def __init__(
            self,
            hosts: Iterable[Dict[str, Any]] = __DEFAULT_ELASTICSEARCH_HOST,
            auth_details=(__DEFAULT_AUTH_USER, __DEFAULT_AUTH_PASSWD),
            aws_access_key: str = __DEFAULT_AWS_ACCESS_KEY,
            aws_secret_key: str = __DEFAULT_AWS_SECRET_KEY,
            aws_region: str = __DEFAULT_AWS_REGION,
            auth_type: AuthType = __DEFAULT_AUTH_TYPE,
            use_ssl: bool = __DEFAULT_USE_SSL,
            verify_ssl: bool = __DEFAULT_VERIFY_SSL,
            buffer_size: int = __DEFAULT_BUFFER_SIZE,
            flush_frequency_in_sec: int = __DEFAULT_FLUSH_FREQ_INSEC,
            es_index_name: str = __DEFAULT_ES_INDEX_NAME,
            index_name_frequency:
        IndexNameFrequency = __DEFAULT_INDEX_FREQUENCY,
            es_doc_type: str = __DEFAULT_ES_DOC_TYPE,
            es_additional_fields: Optional[Dict] = None,
            raise_on_indexing_exceptions: bool = __DEFAULT_RAISE_ON_EXCEPTION,
            default_timestamp_field_name: str = __DEFAULT_TIMESTAMP_FIELD_NAME,
            timed_flush: bool = False,
            error_stream: TextIO = sys.stderr):
        """ Handler constructor

        :param hosts: The list of hosts that elasticsearch clients will connect. The list can be provided
                    in the format ```[{'host':'host1','port':9200}, {'host':'host2','port':9200}]``` to
                    make sure the client supports failover of one of the instertion nodes
        :param auth_details: When ```ESHandler.AuthType.BASIC_AUTH``` is used this argument must contain
                    a tuple of string with the user and password that will be used to authenticate against
                    the Elasticsearch servers, for example```('User','Password')
        :param aws_access_key: When ```ESHandler.AuthType.AWS_SIGNED_AUTH``` is used this argument must contain
                    the AWS key id of the  the AWS IAM user
        :param aws_secret_key: When ```ESHandler.AuthType.AWS_SIGNED_AUTH``` is used this argument must contain
                    the AWS secret key of the  the AWS IAM user
        :param aws_region: When ```ESHandler.AuthType.AWS_SIGNED_AUTH``` is used this argument must contain
                    the AWS region of the  the AWS Elasticsearch servers, for example```'us-east'
        :param auth_type: The authentication type to be used in the connection ```ESHandler.AuthType```
                    Currently, NO_AUTH, BASIC_AUTH, KERBEROS_AUTH are supported
        :param use_ssl: A boolean that defines if the communications should use SSL encrypted communication
        :param verify_ssl: A boolean that defines if the SSL certificates are validated or not
        :param buffer_size: An int, Once this size is reached on the internal buffer results are flushed into ES
        :param flush_frequency_in_sec: A float representing how often and when the buffer will be flushed, even
                    if the buffer_size has not been reached yet
        :param es_index_name: A string with the prefix of the elasticsearch index that will be created. Note a
                    date with YYYY.MM.dd, ```python_logger``` used by default
        :param index_name_frequency: Defines what the date used in the postfix of the name would be. available values
                    are selected from the IndexNameFrequency class (IndexNameFrequency.DAILY,
                    IndexNameFrequency.WEEKLY, IndexNameFrequency.MONTHLY, IndexNameFrequency.YEARLY). By default
                    it uses daily indices.
        :param es_doc_type: A string with the name of the document type that will be used ```python_log``` used
                    by default
        :param es_additional_fields: A dictionary with all the additional fields that you would like to add
                    to the logs, such the application, environment, etc.
        :param raise_on_indexing_exceptions: A boolean, True only for debugging purposes to raise exceptions
                    caused when
        :param timed_flush: A boolean, will perform flushing on an independent thread every flush_frequency_in_sec secs
                            regardless if the buffer size is full or not
        :return: A ready to be used ESHandler.
        """
        logging.Handler.__init__(self)

        self.hosts = hosts
        self.auth_details = auth_details
        self.aws_access_key = aws_access_key
        self.aws_secret_key = aws_secret_key
        self.aws_region = aws_region
        self.auth_type = auth_type
        self.use_ssl = use_ssl
        self.verify_certs = verify_ssl
        self.buffer_size = buffer_size
        self.flush_frequency_in_sec = flush_frequency_in_sec
        self.es_index_name = es_index_name
        self.index_name_frequency = index_name_frequency
        self.es_doc_type = es_doc_type

        if es_additional_fields is None:
            self.es_additional_fields = {}
        else:
            self.es_additional_fields = es_additional_fields.copy()
        self.es_additional_fields.update({
            'host':
            socket.gethostname(),
            'host_ip':
            socket.gethostbyname(socket.gethostname())
        })
        self.raise_on_indexing_exceptions = raise_on_indexing_exceptions
        self.default_timestamp_field_name = default_timestamp_field_name
        self._timed_flush = timed_flush
        self._error_stream = error_stream

        self._client = None
        self._buffer = []
        self._buffer_lock = Lock()
        self._timer = None
        self._index_name_func = ESHandler._INDEX_FREQUENCY_FUNCION_DICT[
            self.index_name_frequency]
        self.serializer = ESSerializer()
        # Next filter is needed as elasticsearch.bulk function calls logging.info in its http_requests module,
        #       and creates an infinite loop of logging.
        self.addFilter(self.es_filter)