예제 #1
0
 def _wrapper(*args, **kwargs):
     start = time.time()
     func(*args, **kwargs)
     total = time.time() - start
     LOGGER.info('[%s] Function executed in %.4f seconds.', func.__name__,
                 total)
     return total
예제 #2
0
    def _finalize(self):
        """Method for performing any final steps, like saving applicable state

        This function is also used to invoke a new copy of this lambda in the case
        that there are more logs available to collect.
        """
        if not self._last_timestamp:
            LOGGER.error(
                'Ending last timestamp is 0. This should not happen and is likely '
                'due to the subclass not setting this value.')

        if self._last_timestamp == self._config.start_last_timestamp:
            LOGGER.info(
                'Ending last timestamp is the same as the beginning last timestamp. '
                'This could occur if there were no logs collected for this execution.'
            )

        LOGGER.info('[%s] App complete; gathered %d logs in %d polls.', self,
                    self._gathered_log_count, self._poll_count)

        self._config.last_timestamp = self._last_timestamp

        # If there are more logs to poll, invoke this app function again and mark
        # the config as 'partial'. Marking the state as 'partial' prevents
        # scheduled function invocations from running alongside chained invocations.
        if self._more_to_poll:
            self._config.mark_partial()
            self._invoke_successive_app()
            return

        self._config.mark_success()
예제 #3
0
    def _invoke_successive_app(self):
        """Invoke a successive app function to handle more logs

        This is useful when there were more logs to collect than could be accomplished
        in this execution. Instead of marking the config with 'success' and waiting
        for the next scheduled execution, this will invoke the lambda again with an
        'event' indicating there are more logs to collect. Other scheduled executions
        will not have an 'event' to allow for this type of override, and will exit
        when checking the 'self._config.is_running' property. This allows for chained
        invocations without the worry of duplicated effort or collisions.
        """
        lambda_client = boto3.client('lambda')
        try:
            response = lambda_client.invoke(
                FunctionName=self._config.function_name,
                InvocationType='Event',
                Payload=self._config.successive_event,
                Qualifier=self._config.function_version)
        except ClientError as err:
            LOGGER.error(
                'An error occurred while invoking a subsequent app function '
                '(\'%s:%s\'). Error is: %s', self._config.function_name,
                self._config.function_version, err.response)
            raise

        LOGGER.info(
            'Invoking successive apps function \'%s\' with Lambda request ID \'%s\'',
            self._config.function_name,
            response['ResponseMetadata']['RequestId'])
예제 #4
0
    def _initialize(self):
        """Method for performing any startup steps, like setting state to running"""
        # Perform another safety check to make sure this is not being invoked already
        if self._config.is_running:
            LOGGER.error('[%s] App already running', self)
            return False

        # Check if this is an invocation spawned from a previous partial execution
        # Return if the config is marked as 'partial' but the invocation type is wrong
        if not self._config.is_successive_invocation and self._config.is_partial:
            LOGGER.error('[%s] App in partial execution state, exiting', self)
            return False

        LOGGER.info('[%s] Starting app', self)

        LOGGER.info('App executing as a successive invocation: %s',
                    self._config.is_successive_invocation)

        # Validate the auth in the config. This raises an exception upon failure
        self._config.validate_auth(set(self.required_auth_info()))

        self._config.set_starting_timestamp(self.date_formatter())

        self._last_timestamp = self._config.last_timestamp

        # Mark this app as running, which updates the parameter store
        self._config.mark_running()

        return True
예제 #5
0
    def _determine_last_time(self, date_format):
        """Determine the last time this function was executed and fallback on
        evaluating the rate value if there is no last timestamp available

        Returns:
            int: The unix timestamp for the starting point to fetch logs back to
        """
        if not self.last_timestamp:
            interval_time = self._evaluate_interval()
            current_time = int(calendar.timegm(time.gmtime()))
            time_delta = current_time - interval_time
            LOGGER.debug(
                'Current timestamp: %s seconds. Calculated delta: %s seconds',
                current_time, time_delta)

            # Request the date format from the app since some services expect different types
            # Using init=False will return the class without instantiating it
            if date_format:
                self.last_timestamp = datetime.utcfromtimestamp(
                    time_delta).strftime(date_format)
            else:
                self.last_timestamp = time_delta

        LOGGER.info('Starting last timestamp set to: %s', self.last_timestamp)

        return self.last_timestamp
예제 #6
0
    def _send_logs_to_lambda(self, logs):
        """Protected method for sending logs to the rule processor lambda
        function for processing. This performs some size checks before sending.

        Args:
            source_function (str): The app function name from which the logs came
            logs (list): List of the logs that have been gathered
        """
        # Create a payload to be sent to the rule processor that contains the
        # service these logs were collected from and the list of logs
        payload = {'Records': [{'stream_alert_app': self._source_function, 'logs': logs}]}
        payload_json = json.dumps(payload, separators=(',', ':'))
        if len(payload_json) > self.MAX_LAMBDA_PAYLOAD_SIZE:
            if len(logs) == 1:
                LOGGER.error('Log payload size for single log exceeds input limit and will be '
                             'dropped (%d > %d max).', len(payload_json),
                             self.MAX_LAMBDA_PAYLOAD_SIZE)
                return True

            LOGGER.debug('Log payload size for %d logs exceeds limit and will be '
                         'segmented (%d > %d max).', len(logs), len(payload_json),
                         self.MAX_LAMBDA_PAYLOAD_SIZE)
            return False

        LOGGER.debug('Sending %d logs to rule processor with payload size %d',
                     len(logs), len(payload_json))

        try:
            response = Batcher.LAMBDA_CLIENT.invoke(
                FunctionName=self._destination_function,
                InvocationType='Event',
                Payload=payload_json,
                Qualifier='production'
            )

        except ClientError as err:
            LOGGER.error('An error occurred while sending logs to '
                         '\'%s:production\'. Error is: %s',
                         self._destination_function,
                         err.response)
            raise

        LOGGER.info('Sent %d logs to \'%s\' with Lambda request ID \'%s\'',
                    len(logs),
                    self._destination_function,
                    response['ResponseMetadata']['RequestId'])

        return True
예제 #7
0
    def send_logs(self, logs):
        """Public method to send the logs to the rule processor

        Args:
            source_function (str): The app function name from which the logs came
            logs (list): List of the logs that have been gathered
        """
        LOGGER.info('Starting batch send of %d logs to the rule processor', len(logs))

        # Try to send all of the logs in one fell swoop
        if self._send_logs_to_lambda(logs):
            return

        # Fall back on segmenting the list of logs into multiple requests
        # if they could not be sent at once
        self._segment_and_send(logs)

        LOGGER.info('Finished batch send of %d logs to the rule processor', len(logs))
예제 #8
0
 def report_remaining_seconds(self):
     """Log the remaining seconds"""
     LOGGER.info('Lambda remaining seconds: %.2f',
                 self.remaining_ms() / 1000.0)