Exemplo n.º 1
0
    def add_app_integration(self, app_info):
        """Add a configuration for a new streamalert app integration function

        Args:
            app_info (dict): The necessary values needed to begin configuring
                a new app integration
        """
        exists, prompt_for_auth, overwrite = False, True, False
        app = get_app(app_info, False)

        # Check to see if there is an existing configuration for this app integration
        cluster_config = self.config['clusters'][app_info['cluster']]
        if app_info['app_name'] in cluster_config['modules'].get('stream_alert_apps', {}):
            prompt = ('An app with the name \'{}\' is already configured for cluster '
                      '\'{}\'. Would you like to update the existing app\'s configuration'
                      '?'.format(app_info['app_name'], app_info['cluster']))

            exists = True

            # Return if the user is not deliberately updating an existing config
            if not continue_prompt(message=prompt):
                return

            prompt = ('Would you also like to update the authentication information for '
                      'app integration with name \'{}\'?'.format(app_info['app_name']))

            # If this is true, we shouldn't prompt again to warn about overwriting
            prompt_for_auth = overwrite = continue_prompt(message=prompt)

        if prompt_for_auth and not save_app_auth_info(app, app_info, overwrite):
            return

        apps_config = cluster_config['modules'].get('stream_alert_apps', {})
        local_config_keys = {'interval', 'timeout', 'memory'}
        if not exists:
            # Save a default log level as info to the config
            app_info['log_level'] = 'info'
            app_info['current_version'] = '$LATEST'
            local_config_keys.update({'log_level', 'current_version', 'type'})

            apps_config[app_info['app_name']] = {key: app_info[key]
                                                 for key in local_config_keys}
        else:
            apps_config[app_info['app_name']].update({key: app_info[key]
                                                      for key in local_config_keys})


        cluster_config['modules']['stream_alert_apps'] = apps_config

        # Add this service to the sources for this app integration
        # The `stream_alert_app` is purposely singular here
        app_sources = self.config['sources'].get('stream_alert_app', {})
        app_sources[app_info['function_name']] = {'logs': [app.service()]}
        self.config['sources']['stream_alert_app'] = app_sources

        LOGGER_CLI.info('Successfully added \'%s\' app integration to \'conf/clusters/%s.json\' '
                        'for service \'%s\'.', app_info['app_name'],
                        app_info['cluster'], app_info['type'])

        self.write()
Exemplo n.º 2
0
def handler(event, context):
    """Main lambda handler use as the entry point

    Args:
        event (dict): Always empty (for now) event object
        context (LambdaContxt): AWS LambdaContext object
    """
    if event and 'full_run' in event:
        # TODO: implement support for historical runs via input events
        pass

    try:
        # Load the config from this context object, pulling info from parameter store
        config = AppConfig.load_config(context)

        # The config specifies what app this function is supposed to run
        app = get_app(config)

        # Run the gather operation
        app.gather()
    finally:
        # If the config was loaded, save a bad state if the current state is not
        # marked as a success (aka running)
        if 'config' in locals():
            if not config.is_success:
                config.mark_failure()
Exemplo n.º 3
0
def handler(event, context):
    """Main lambda handler use as the entry point

    Args:
        event (dict): Event object that can potentially contain details on what to
            during this invocation. An example of this is the 'invocation_type' key
            that is used as an override to allow for successive invocations (and in
            the future, support for historical invocations)
        context (LambdaContxt): AWS LambdaContext object
    """
    try:
        # Load the config from this context object, pulling info from parameter store
        # The event object can contain detail about what to do, ie: 'invocation_type'
        config = AppConfig.load_config(context, event)

        # The config specifies what app this function is supposed to run
        app = get_app(config)

        # Run the gather operation
        app.gather()
    finally:
        # If the config was loaded, save a bad state if the current state is still
        # marked as 'running' (aka not 'success' or 'partial' runs)
        if 'config' in locals() and config.is_running:
            config.mark_failure()
Exemplo n.º 4
0
    def _determine_last_time(self):
        """Determine the last time this function was executed and fallback on
        evaluating the rate value if there is no last timestamp available

        Returns:
            int: The unix timestamp for the starting point to fetch logs back to
        """
        if not self.last_timestamp:
            interval_time = self.evaluate_interval()
            current_time = int(time.mktime(time.gmtime()))
            time_delta = current_time - interval_time
            LOGGER.debug('Current timestamp: %s seconds. Calculated delta: %s seconds',
                         current_time, time_delta)

            # Request the date format from the app since some services expect different types
            # Using init=False will return the class without instantiating it
            date_format = get_app(self, init=False).date_formatter()
            if date_format:
                self.last_timestamp = datetime.utcfromtimestamp(time_delta).strftime(date_format)
            else:
                self.last_timestamp = time_delta

        LOGGER.info('Starting last timestamp set to: %s', self.last_timestamp)

        return self.last_timestamp
def _app_integration_handler(options):
    """Perform app integration related functions

    Args:
        options (argparser): Contains all of the necessary info for configuring
            a new app integration or updating an existing one
    """
    if not options:
        return

    # Convert the options to a dict
    app_info = vars(options)

    # Add the region and prefix for this StreamAlert instance to the app info
    app_info['region'] = str(CONFIG['global']['account']['region'])
    app_info['prefix'] = str(CONFIG['global']['account']['prefix'])

    # Function name follows the format: '<prefix>_<cluster>_<service>_<app_name>_app
    func_parts = ['prefix', 'cluster', 'type', 'app_name']

    # Create a new app integration function
    if options.subcommand == 'new':
        app_info['function_name'] = '_'.join(
            [app_info.get(value) for value in func_parts] + ['app'])

        CONFIG.add_app_integration(app_info)
        return

    # Update the auth information for an existing app integration function
    if options.subcommand == 'update-auth':
        cluster_config = CONFIG['clusters'][app_info['cluster']]
        if not app_info['app_name'] in cluster_config['modules'].get(
                'stream_alert_apps', {}):
            LOGGER_CLI.error(
                'App integration with name \'%s\' does not exist for cluster \'%s\'',
                app_info['app_name'], app_info['cluster'])
            return

        # Get the type for this app integration from the current
        # config so we can update it properly
        app_info['type'] = cluster_config['modules']['stream_alert_apps'] \
                                         [app_info['app_name']]['type']

        app_info['function_name'] = '_'.join(
            [app_info.get(value) for value in func_parts] + ['app'])

        app = get_app(app_info)

        if not save_app_auth_info(app, app_info, True):
            return

        return

    # List all of the available app integrations, broken down by cluster
    if options.subcommand == 'list':
        all_info = {
            cluster: cluster_config['modules'].get('stream_alert_apps')
            for cluster, cluster_config in CONFIG['clusters'].iteritems()
        }

        for cluster, info in all_info.iteritems():
            print '\nCluster: {}\n'.format(cluster)
            if not info:
                print '\tNo Apps configured\n'
                continue

            for name, details in info.iteritems():
                print '\tName: {}'.format(name)
                print '\n'.join([
                    '\t\t{key}:{padding_char:<{padding_count}}{value}'.format(
                        key=key_name,
                        padding_char=' ',
                        padding_count=30 - (len(key_name)),
                        value=value)
                    for key_name, value in details.iteritems()
                ] + ['\n'])
Exemplo n.º 6
0
def test_get_app_exception_invalid():
    """App Integration - App Base, Get App Exception for Invalid Service"""
    config = AppConfig(get_valid_config_dict('duo_auth'))
    config['type'] = 'bad_service_type'
    get_app(config)
Exemplo n.º 7
0
def test_get_app_exception_type():
    """App Integration - App Base, Get App Exception for No 'type'"""
    config = AppConfig(get_valid_config_dict('duo_auth'))
    del config['type']
    get_app(config)
Exemplo n.º 8
0
def test_get_app():
    """App Integration - App Base, Get App"""
    config = AppConfig(get_valid_config_dict('duo_auth'))
    app = get_app(config)
    assert_is_not_none(app)