Exemple #1
0
    def execute_api_server(self, listen_address=None, listen_port=None, ssl_cert=None, ssl_key=None):

        # https://gist.github.com/rduplain/1705072
        # this is a bit weird because I want the urls to be the same as they
        # are configured for apache, where they are all starting with /api
        
        import aceapi
        from saq.database import initialize_database

        app = aceapi.create_app(testing=True)
        from werkzeug.serving import run_simple
        from werkzeug.wsgi import DispatcherMiddleware
        from flask import Flask
        app.config['DEBUG'] = True
        app.config['APPLICATION_ROOT'] = '/api'
        application = DispatcherMiddleware(Flask('dummy_app'), {
            app.config['APPLICATION_ROOT']: app,
        })

        if listen_address is None:
            listen_address = saq.CONFIG.get('api', 'listen_address')
        if listen_port is None:
            listen_port = saq.CONFIG.getint('api', 'listen_port')
        ssl_context = (
            saq.CONFIG.get('api', 'ssl_cert') if ssl_cert is None else ssl_cert,
            saq.CONFIG.get('api', 'ssl_key') if ssl_key is None else ssl_key )

        initialize_database()
        saq.db = aceapi.db.session

        logging.info(f"starting api server on {listen_address} port {listen_port}")
        run_simple(listen_address, listen_port, application, ssl_context=ssl_context, use_reloader=False)
Exemple #2
0
def initialize_test_environment():
    global initialized
    global test_dir

    if initialized:
        return

    # there is no reason to run anything as root
    if os.geteuid() == 0:
        print("do not run ace as root please")
        sys.exit(1)

    # where is ACE?
    saq_home = '/opt/saq'
    if 'SAQ_HOME' in os.environ:
        saq_home = os.environ['SAQ_HOME']

    # adjust search path
    sys.path.append(os.path.join(saq_home, 'lib'))

    # initialize saq
    import saq
    saq.initialize(saq_home=saq_home,
                   config_paths=[],
                   logging_config_path=os.path.join(saq_home, 'etc',
                                                    'unittest_logging.ini'),
                   args=None,
                   relative_dir=None)

    # additional logging required for testing
    initialize_unittest_logging()

    # create a temporary storage directory
    test_dir = os.path.join(saq.SAQ_HOME, 'var', 'test')
    if os.path.exists(test_dir):
        try:
            shutil.rmtree(test_dir)
        except Exception as e:
            logging.error("unable to delete {}: {}".format(test_dir, e))
            sys.exit(1)

    try:
        os.makedirs(test_dir)
    except Exception as e:
        logging.error("unable to create temp dir {}: {}".format(test_dir, e))

    # in all our testing we use the password "password" for encryption/decryption
    from saq.crypto import get_aes_key
    saq.ENCRYPTION_PASSWORD = get_aes_key('password')

    initialize_database()
    initialized = True
Exemple #3
0
def initialize(saq_home=None,
               config_paths=None,
               logging_config_path=None,
               args=None,
               relative_dir=None,
               unittest=False):

    from saq.database import initialize_database, initialize_node

    global API_PREFIX
    global CA_CHAIN_PATH
    global COMPANY_ID
    global COMPANY_NAME
    global CONFIG
    global CONFIG_PATHS
    global DAEMON_MODE
    global DATA_DIR
    global DEFAULT_ENCODING
    global DUMP_TRACEBACKS
    global ENCRYPTION_PASSWORD
    global EXCLUDED_SLA_ALERT_TYPES
    global EXECUTION_THREAD_LONG_TIMEOUT
    global FORCED_ALERTS
    global GLOBAL_SLA_SETTINGS
    global GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES
    global INSTANCE_TYPE
    global LOCK_TIMEOUT_SECONDS
    global LOG_DIRECTORY
    global LOG_LEVEL
    global MANAGED_NETWORKS
    global MODULE_STATS_DIR
    global OTHER_PROXIES
    global OTHER_SLA_SETTINGS
    global PROXIES
    global SAQ_HOME
    global SAQ_NODE
    global SAQ_NODE_ID
    global SAQ_RELATIVE_DIR
    global SEMAPHORES_ENABLED
    global STATS_DIR
    global TEMP_DIR
    global TOR_PROXY
    global YSS_BASE_DIR
    global YSS_SOCKET_DIR

    SAQ_HOME = None
    SAQ_NODE = None
    SAQ_NODE_ID = None
    API_PREFIX = None
    SAQ_RELATIVE_DIR = None
    CONFIG = None
    CONFIG_PATHS = []
    DATA_DIR = None
    TEMP_DIR = None
    DEFAULT_ENCODING = None
    SEMAPHORES_ENABLED = False
    PROXIES = {}
    OTHER_PROXIES = {}
    TOR_PROXY = None
    # list of iptools.IpRange objects defined in [network_configuration]
    MANAGED_NETWORKS = None
    # set this to True to force all anlaysis to result in an alert being generated
    FORCED_ALERTS = False
    # the gpg private key password for encrypting/decrypting archive files
    # this can be provided on the command line so that these files can also be analyzed
    ENCRYPTION_PASSWORD = None

    # the global log level setting
    LOG_LEVEL = logging.INFO
    # global logging directory (relative to DATA_DIR)
    LOG_DIRECTORY = None

    # directory containing statistical runtime info
    STATS_DIR = None
    MODULE_STATS_DIR = None

    # are we running as a daemon in the background?
    DAEMON_MODE = False

    # path to the certifcate chain used by all SSL certs
    CA_CHAIN_PATH = None

    # what type of instance is this?
    INSTANCE_TYPE = INSTANCE_TYPE_PRODUCTION

    # SLA settings
    GLOBAL_SLA_SETTINGS = None
    OTHER_SLA_SETTINGS = []
    EXCLUDED_SLA_ALERT_TYPES = []

    # Yara Scanner Server base directory
    YSS_BASE_DIR = None
    YSS_SOCKET_DIR = None

    # set to True to cause tracebacks to be dumped to standard output
    # useful when debugging or testing
    DUMP_TRACEBACKS = False

    # the amount of time (in seconds) that a lock in the locks table is valid
    LOCK_TIMEOUT_SECONDS = None

    # amount of time (in seconds) before a process blows up because a threaded module won't stop
    EXECUTION_THREAD_LONG_TIMEOUT = None

    # the company/custom this node belongs to
    COMPANY_NAME = None
    COMPANY_ID = None

    # go ahead and try to figure out what text encoding we're using
    DEFAULT_ENCODING = locale.getpreferredencoding()

    # list of observable types we want to exclude from whitelisting (via the GUI)
    GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES = []

    # do we want to force alerts?
    if args:
        FORCED_ALERTS = args.force_alerts

    # what is the root directory of the entire system?
    if saq_home is not None:
        SAQ_HOME = saq_home
    elif 'SAQ_HOME' in os.environ:
        SAQ_HOME = os.environ['SAQ_HOME']
    else:
        SAQ_HOME = '.'

    if not os.path.isdir(SAQ_HOME):
        sys.stderr.write("invalid root SAQ directory {0}\n".format(SAQ_HOME))
        sys.exit(1)

    # XXX not sure we need this SAQ_RELATIVE_DIR anymore -- check it out
    # this system was originally designed to run out of /opt/saq
    # later we modified to run out of anywhere for command line correlation
    # when running the GUI in apache you have no control over the current working directory
    # so we specify what directory we'd *want* to be running out of here (even if we're not actually)
    # this only matters when loading alerts
    # this defaults to the current working directory
    SAQ_RELATIVE_DIR = os.path.relpath(os.getcwd(), start=SAQ_HOME)
    if relative_dir:
        SAQ_RELATIVE_DIR = relative_dir

    # load configuration file
    # defaults to $SAQ_HOME/etc/saq.ini
    if args:
        if args.config_paths:
            config_paths = args.config_paths

    if config_paths is None:
        config_paths = []

    # make each relative config path absolute to SAQ_HOME
    CONFIG_PATHS = [
        os.path.join(SAQ_HOME, p) if not os.path.isabs(p) else p
        for p in config_paths
    ]

    # add any config files specified in SAQ_CONFIG_PATHS env var (command separated)
    #sys.stderr.write("SAQ_CONFIG_PATHS = {}\n".format(os.environ['SAQ_CONFIG_PATHS']))
    if 'SAQ_CONFIG_PATHS' in os.environ:
        for config_path in os.environ['SAQ_CONFIG_PATHS'].split(','):
            config_path = config_path.strip()
            if not os.path.isabs(config_path):
                config_path = os.path.join(SAQ_HOME, config_path)
            if not os.path.exists(config_path):
                sys.stderr.write(
                    "WARNING: config path {} specified in SAQ_CONFIG_PATHS env var does not exist\n"
                    .format(config_path))
            else:
                if config_path not in CONFIG_PATHS:
                    CONFIG_PATHS.append(config_path)

    # if $SAQ_HOME/etc/saq.ini exists then we use that as the last config if it's not already specified
    default_config_path = os.path.join(SAQ_HOME, 'etc', 'saq.ini')

    # use unit test config if we are running a unit test
    if unittest:
        default_config_path = os.path.join(SAQ_HOME, 'etc', 'saq.unittest.ini')

    if os.path.exists(default_config_path):
        if default_config_path not in CONFIG_PATHS:
            CONFIG_PATHS.append(default_config_path)

    try:
        load_configuration()
    except Exception as e:
        sys.stderr.write("ERROR: unable to load configuration: {0}".format(
            str(e)))
        sys.exit(1)

    DATA_DIR = os.path.join(SAQ_HOME, CONFIG['global']['data_dir'])
    TEMP_DIR = os.path.join(DATA_DIR, CONFIG['global']['tmp_dir'])
    COMPANY_NAME = CONFIG['global']['company_name']
    COMPANY_ID = CONFIG['global'].getint('company_id')

    minutes, seconds = map(int, CONFIG['global']['lock_timeout'].split(':'))
    LOCK_TIMEOUT_SECONDS = (minutes * 60) + seconds
    EXECUTION_THREAD_LONG_TIMEOUT = CONFIG['global'].getint(
        'execution_thread_long_timeout')

    # user specified log level
    LOG_LEVEL = logging.INFO
    if args:
        if args.log_level:
            LOG_LEVEL = args.log_level

    # make sure the logs directory exists
    LOG_DIRECTORY = os.path.join(DATA_DIR, 'logs')
    if not os.path.exists(LOG_DIRECTORY):
        try:
            os.mkdir(LOG_DIRECTORY)
        except Exception as e:
            sys.stderr.write("unable to mkdir {}: {}\n".format(
                LOG_DIRECTORY, e))
            sys.exit(1)

    # by default we log to the console
    if logging_config_path is None:
        logging_config_path = os.path.join(SAQ_HOME, 'etc',
                                           'console_logging.ini')

    # we can override this on the command line
    # this is what we use for production engine settings
    if args:
        if args.logging_config_path:
            logging_config_path = args.logging_config_path

    # we can re-initialize later if we have to
    try:
        initialize_logging(
            logging_config_path
        )  # this log file just gets some startup information
    except Exception as e:
        sys.exit(1)

    GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES = [
        _.strip() for _ in CONFIG['gui']
        ['whitelist_excluded_observable_types'].split(',')
    ]

    for o_type in GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES:
        if o_type not in VALID_OBSERVABLE_TYPES:
            logging.error(
                f"invalid observable type {o_type} specified in [gui] whitelist_excluded_observable_types"
            )

    # make this a faster lookup
    GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES = set(
        GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES)

    # load global SLA settings
    GLOBAL_SLA_SETTINGS = SLA(None, CONFIG['SLA'].getboolean('enabled'),
                              CONFIG['SLA'].getint('time_to_dispo'),
                              CONFIG['SLA'].getint('approaching_warn'), None,
                              None)

    EXCLUDED_SLA_ALERT_TYPES = [
        x.strip() for x in CONFIG['SLA']['excluded_alert_types'].split(',')
    ]

    # load all the other SLA settings
    for section in [s for s in CONFIG.keys() if s.startswith('SLA_')]:
        logging.debug("loading {}".format(section))
        OTHER_SLA_SETTINGS.append(
            SLA(section[len('SLA_'):], CONFIG[section].getboolean('enabled'),
                CONFIG[section].getint('time_to_dispo'),
                CONFIG[section].getint('approaching_warn'),
                CONFIG[section]['property'], CONFIG[section]['value']))

    # what node is this?
    try:
        SAQ_NODE = CONFIG['global']['node']
        if SAQ_NODE == 'AUTO':
            SAQ_NODE = socket.getfqdn()
    except Exception as e:
        sys.stderr.write("unable to get hostname: {}\n".format(e))
        sys.exit(1)

    # what prefix do other systems use to communicate to the API server for this node?
    try:
        API_PREFIX = CONFIG['api']['prefix']
        if API_PREFIX == 'AUTO':
            API_PREFIX = socket.getfqdn()
        logging.debug("node {} has api prefix {}".format(SAQ_NODE, API_PREFIX))
    except Exception as e:
        sys.stderr.write("unable to get hostname: {}\n".format(e))
        sys.exit(1)

    # what type of instance is this?
    if 'instance_type' in CONFIG['global']:
        INSTANCE_TYPE = CONFIG['global']['instance_type']
        if INSTANCE_TYPE not in [
                INSTANCE_TYPE_PRODUCTION, INSTANCE_TYPE_QA, INSTANCE_TYPE_DEV
        ]:
            logging.warning(
                "invalid instance type {}: defaulting to {}".format(
                    INSTANCE_TYPE, INSTANCE_TYPE_PRODUCTION))
            INSTANCE_TYPE = INSTANCE_TYPE_PRODUCTION
    else:
        logging.warning(
            "missing configuration instance_type in global section (defaulting to instance type {})"
            .format(INSTANCE_TYPE_PRODUCTION))

    if FORCED_ALERTS:  # lol
        logging.warning(
            " ****************************************************************** "
        )
        logging.warning(
            " ****************************************************************** "
        )
        logging.warning(
            " **** WARNING **** ALL ANALYSIS RESULTS IN ALERTS **** WARNING **** "
        )
        logging.warning(
            " ****************************************************************** "
        )
        logging.warning(
            " ****************************************************************** "
        )

    # warn if timezone is not UTC
    #if time.strftime("%z") != "+0000":
    #logging.warning("Timezone is not UTC. All ACE systems in a cluster should be in UTC.")

    # we can globally disable semaphores with this flag
    SEMAPHORES_ENABLED = CONFIG.getboolean('global', 'enable_semaphores')

    # some settings can be set to PROMPT
    for section in CONFIG.sections():
        for (name, value) in CONFIG.items(section):
            if value == 'PROMPT':
                CONFIG.set(
                    section, name,
                    getpass("Enter the value for {0}:{1}: ".format(
                        section, name)))

    # make sure we've got the ca chain for SSL certs
    CA_CHAIN_PATH = os.path.join(SAQ_HOME, CONFIG['SSL']['ca_chain_path'])
    ace_api.set_default_ssl_ca_path(CA_CHAIN_PATH)

    # XXX this should probably move to the yara scanning module
    # set the location we'll be running yss out of
    YSS_BASE_DIR = os.path.join(SAQ_HOME, CONFIG['yara']['yss_base_dir'])
    if not os.path.exists(YSS_BASE_DIR):
        logging.critical(
            "[yara][yss_base_dir] is set to {} but does not exist".format(
                YSS_BASE_DIR))

    YSS_SOCKET_DIR = os.path.join(YSS_BASE_DIR,
                                  CONFIG['yara']['yss_socket_dir'])

    # initialize the database connection
    initialize_database()

    # initialize fallback semaphores
    initialize_fallback_semaphores()

    # XXX get rid of this
    try:
        maliciousdir = CONFIG.get("global", "malicious")
    except:
        maliciousdir = "malicious"

    STATS_DIR = os.path.join(DATA_DIR, 'stats')
    MODULE_STATS_DIR = os.path.join(STATS_DIR, 'modules')

    # make sure some key directories exists
    for dir_path in [
            # anaysis data
            os.path.join(DATA_DIR, CONFIG['global']['node']),
            #os.path.join(SAQ_HOME, 'var', 'locks'), # XXX remove
            os.path.join(DATA_DIR, 'review', 'rfc822'),
            os.path.join(DATA_DIR, 'review', 'misc'),
            os.path.join(DATA_DIR, CONFIG['global']['error_reporting_dir']),
            STATS_DIR,
            MODULE_STATS_DIR,
            os.path.join(STATS_DIR, 'brocess'),  # get rid of this
            os.path.join(STATS_DIR, 'metrics'),
            os.path.join(DATA_DIR, CONFIG['splunk_logging']['splunk_log_dir']),
            os.path.join(DATA_DIR, DATA_DIR,
                         CONFIG['elk_logging']['elk_log_dir']),
            os.path.join(TEMP_DIR),
            os.path.join(SAQ_HOME, CONFIG['yara']['yss_base_dir'], 'logs'),
    ]:  # XXX this should be in YSS
        #os.path.join(SAQ_HOME, maliciousdir) ]: # XXX remove
        try:
            if not os.path.isdir(dir_path):
                os.makedirs(dir_path)
        except Exception as e:
            logging.error("unable to create required directory {}: {}".format(
                dir_path, str(e)))
            sys.exit(1)

    # clear out any proxy environment variables if they exist
    for proxy_key in ['http_proxy', 'https_proxy', 'ftp_proxy']:
        if proxy_key in os.environ:
            logging.debug(
                "removing proxy environment variable for {}".format(proxy_key))
            del os.environ[proxy_key]

    # set up the PROXY global dict (to be used with the requests library)
    for proxy_key in ['http', 'https']:
        if CONFIG['proxy']['host'] and CONFIG['proxy']['port'] and CONFIG[
                'proxy']['transport']:
            if CONFIG['proxy']['user'] and CONFIG['proxy']['password']:
                PROXIES[proxy_key] = '{}://{}:{}@{}:{}'.format(
                    CONFIG['proxy']['transport'], CONFIG['proxy']['user'],
                    CONFIG['proxy']['password'], CONFIG['proxy']['host'],
                    CONFIG['proxy']['port'])
            else:
                PROXIES[proxy_key] = '{}://{}:{}'.format(
                    CONFIG['proxy']['transport'], CONFIG['proxy']['host'],
                    CONFIG['proxy']['port'])
            logging.debug("proxy for {} set to {}".format(
                proxy_key, PROXIES[proxy_key]))

    # load any additional proxies specified in the config sections proxy_*
    for section in CONFIG.keys():
        if section.startswith('proxy_'):
            proxy_name = section[len('proxy_'):]
            OTHER_PROXIES[proxy_name] = {}
            for proxy_key in ['http', 'https']:
                if CONFIG[section]['host'] and CONFIG[section][
                        'port'] and CONFIG[section]['transport']:
                    if 'user' in CONFIG[section] and 'password' in CONFIG[section] \
                    and CONFIG[section]['user'] and CONFIG[section]['password']:
                        OTHER_PROXIES[proxy_name][
                            proxy_key] = '{}://{}:{}@{}:{}'.format(
                                CONFIG[section]['transport'],
                                CONFIG[section]['user'],
                                CONFIG[section]['password'],
                                CONFIG[section]['host'],
                                CONFIG[section]['port'])
                    else:
                        OTHER_PROXIES[proxy_name][
                            proxy_key] = '{}://{}:{}'.format(
                                CONFIG[section]['transport'],
                                CONFIG[section]['host'],
                                CONFIG[section]['port'])

    # load global constants
    import iptools

    MANAGED_NETWORKS = []
    for cidr in CONFIG['network_configuration']['managed_networks'].split(','):
        try:
            if cidr:
                MANAGED_NETWORKS.append(iptools.IpRange(cidr.strip()))
        except Exception as e:
            logging.error("invalid managed network {}: {}".format(
                cidr, str(e)))

    # are we running as a daemon?
    if args:
        DAEMON_MODE = args.daemon

    # initialize other systems
    initialize_remediation_system_manager()
    initialize_message_system()

    logging.debug("SAQ initialized")
Exemple #4
0
def initialize(saq_home=None, config_paths=None, logging_config_path=None, args=None, relative_dir=None):

    from saq.database import initialize_database

    global SAQ_HOME
    global SAQ_NODE
    global SAQ_RELATIVE_DIR
    global CONFIG
    global CONFIG_PATHS
    global SINGLE_THREADED
    global DEFAULT_ENCODING
    global SEMAPHORES_ENABLED
    global MANAGED_NETWORKS
    global FORCED_ALERTS
    global LOG_LEVEL
    global DAEMON_MODE
    global CA_CHAIN_PATH
    global INSTANCE_TYPE
    global GLOBAL_SLA_SETTINGS
    global EXCLUDED_SLA_ALERT_TYPES
    global STATS_DIR
    global MODULE_STATS_DIR
    global YSS_BASE_DIR
    global YSS_SOCKET_DIR

    # go ahead and try to figure out what text encoding we're using
    DEFAULT_ENCODING = locale.getpreferredencoding()

    # do we want to force alerts?
    if args:
        FORCED_ALERTS = args.force_alerts

    # do we want to run in single threaded mode?
    if args:
        SINGLE_THREADED = args.single_threaded

    # what is the root directory of the entire system?
    if saq_home is not None:
        SAQ_HOME = saq_home
    elif 'SAQ_HOME' in os.environ:
        SAQ_HOME = os.environ['SAQ_HOME']
    else:
        SAQ_HOME = '.'

    if not os.path.isdir(SAQ_HOME):
        sys.stderr.write("invalid root SAQ directory {0}\n".format(SAQ_HOME)) 
        sys.exit(1)

    # XXX not sure we need this SAQ_RELATIVE_DIR anymore -- check it out
    # this system was originally designed to run out of /opt/saq
    # later we modified to run out of anywhere for command line correlation
    # when running the GUI in apache you have no control over the current working directory
    # so we specify what directory we'd *want* to be running out of here (even if we're not actually)
    # this only matters when loading alerts
    # this defaults to the current working directory
    SAQ_RELATIVE_DIR = os.getcwd()
    if relative_dir:
        SAQ_RELATIVE_DIR = relative_dir

    # load configuration file
    # defaults to $SAQ_HOME/etc/saq.ini
    if args:
        if args.config_paths:
            config_paths = args.config_paths

    if config_paths is None:
        config_paths = []
    
    # make each relative config path absolute to SAQ_HOME
    CONFIG_PATHS = [os.path.join(SAQ_HOME, p) if not os.path.isabs(p) else p for p in config_paths]

    # add any config files specified in SAQ_CONFIG_PATHS env var (command separated)
    #sys.stderr.write("SAQ_CONFIG_PATHS = {}\n".format(os.environ['SAQ_CONFIG_PATHS']))
    if 'SAQ_CONFIG_PATHS' in os.environ:
        for config_path in os.environ['SAQ_CONFIG_PATHS'].split(','):
            config_path = config_path.strip()
            if not os.path.isabs(config_path):
                config_path = os.path.join(SAQ_HOME, config_path)
            if not os.path.exists(config_path):
                sys.stderr.write("WARNING: config path {} specified in SAQ_CONFIG_PATHS env var does not exist\n".format(config_path))
            else:
                if config_path not in CONFIG_PATHS:
                    CONFIG_PATHS.append(config_path)

    # if $SAQ_HOME/etc/saq.ini exists then we use that as the last config if it's not already specified
    default_config_path = os.path.join(SAQ_HOME, 'etc', 'saq.ini')
    if os.path.exists(default_config_path):
        if default_config_path not in CONFIG_PATHS:
            CONFIG_PATHS.append(default_config_path)

    try:
        load_configuration()
    except Exception as e:
        sys.stderr.write("ERROR: unable to load configuration: {0}".format(str(e)))
        sys.exit(1)

    # user specified log level
    LOG_LEVEL = logging.INFO
    if args:
        if args.log_level:
            LOG_LEVEL = args.log_level

    # make sure the logs directory exists
    logs_dir = os.path.join(SAQ_HOME, LOG_DIRECTORY)
    if not os.path.exists(logs_dir):
        try:
            os.mkdir(logs_dir)
        except Exception as e:
            sys.stderr.write("unable to mkdir {}: {}\n".format(logs_dir, e))
            sys.exit(1)

    # by default we log to the console
    if logging_config_path is None:
        logging_config_path = os.path.join(SAQ_HOME, 'etc', 'console_logging.ini')

    # we can override this on the command line
    # this is what we use for production engine settings
    if args:
        if args.logging_config_path:
            logging_config_path = args.logging_config_path
    
    # we can re-initialize later if we have to
    try:
        initialize_logging(logging_config_path) # this log file just gets some startup information
    except Exception as e:
        sys.exit(1)

    # load global SLA settings
    GLOBAL_SLA_SETTINGS = SLA(None, 
                              CONFIG['SLA'].getboolean('enabled'),
                              CONFIG['SLA'].getint('time_to_dispo'),
                              CONFIG['SLA'].getint('approaching_warn'),
                              None, None)

    EXCLUDED_SLA_ALERT_TYPES = [x.strip() for x in CONFIG['SLA']['excluded_alert_types'].split(',')]

    # load all the other SLA settings
    for section in [s for s in CONFIG.keys() if s.startswith('SLA_')]:
        logging.debug("loading {}".format(section))
        OTHER_SLA_SETTINGS.append(SLA(section[len('SLA_'):],
                                      CONFIG[section].getboolean('enabled'),
                                      CONFIG[section].getint('time_to_dispo'),
                                      CONFIG[section].getint('approaching_warn'),
                                      CONFIG[section]['property'],
                                      CONFIG[section]['value']))

    # what node is this?
    try:
        SAQ_NODE = CONFIG['global']['node']
        logging.debug("node {}".format(SAQ_NODE))
    except Exception as e:
        sys.stderr.write("unable to get hostname: {}\n".format(e))
        sys.exit(1)

    # what type of instance is this?
    if 'instance_type' in CONFIG['global']:
        INSTANCE_TYPE = CONFIG['global']['instance_type']
        if INSTANCE_TYPE not in [ INSTANCE_TYPE_PRODUCTION, INSTANCE_TYPE_QA, INSTANCE_TYPE_DEV ]:
            logging.warning("invalid instance type {}: defaulting to {}".format(INSTANCE_TYPE, INSTANCE_TYPE_PRODUCTION))
            INSTANCE_TYPE = INSTANCE_TYPE_PRODUCTION
    else:
        logging.warning("missing configuration instance_type in global section (defaulting to instance type {})".format(INSTANCE_TYPE_PRODUCTION))

    if FORCED_ALERTS: # lol
        logging.warning(" ****************************************************************** ")
        logging.warning(" ****************************************************************** ")
        logging.warning(" **** WARNING **** ALL ANALYSIS RESULTS IN ALERTS **** WARNING **** ")
        logging.warning(" ****************************************************************** ")
        logging.warning(" ****************************************************************** ")

    # we can globally disable semaphores with this flag
    SEMAPHORES_ENABLED = CONFIG.getboolean('global', 'enable_semaphores')

    # log all SQL commands if we are running in debug mode
    if CONFIG['global'].getboolean('log_sql'):
        logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)

    # some settings can be set to PROMPT
    for section in CONFIG.sections():
        for (name, value) in CONFIG.items(section):
            if value == 'PROMPT':
                CONFIG.set(section, name, getpass("Enter the value for {0}:{1}: ".format(section, name)))

    # make sure we've got the ca chain for SSL certs
    CA_CHAIN_PATH = os.path.join(SAQ_HOME, CONFIG['SSL']['ca_chain_path'])

    # set the location we'll be running yss out of
    YSS_BASE_DIR = os.path.join(SAQ_HOME, CONFIG['yara']['yss_base_dir'])
    if not os.path.exists(YSS_BASE_DIR):
        logging.critical("[yara][yss_base_dir] is set to {} but does not exist".format(YSS_BASE_DIR))

    YSS_SOCKET_DIR = os.path.join(YSS_BASE_DIR, CONFIG['yara']['yss_socket_dir'])

    # initialize the database connection
    initialize_database()

    # initialize fallback semaphores
    initialize_fallback_semaphores()

    try:
        maliciousdir = CONFIG.get("global", "malicious")
    except:
        maliciousdir = "malicious"

    STATS_DIR = os.path.join(SAQ_HOME, 'stats')
    MODULE_STATS_DIR = os.path.join(STATS_DIR, 'modules')

    # make sure some key directories exists
    for dir_path in [ 
        os.path.join(SAQ_HOME, CONFIG['global']['data_dir'], CONFIG['global']['node']),
        os.path.join(SAQ_HOME, 'var', 'locks'),
        os.path.join(SAQ_HOME, 'var', 'incoming'),
        os.path.join(SAQ_HOME, 'review', 'rfc822'),
        os.path.join(SAQ_HOME, 'review', 'misc'),
        STATS_DIR,
        MODULE_STATS_DIR,
        os.path.join(SAQ_HOME, 'stats', 'brocess'),
        os.path.join(SAQ_HOME, 'stats', 'metrics'),
        os.path.join(SAQ_HOME, CONFIG['splunk_logging']['splunk_log_dir']),
        os.path.join(SAQ_HOME, CONFIG['elk_logging']['elk_log_dir']),
        os.path.join(SAQ_HOME, CONFIG['global']['tmp_dir']),
        os.path.join(SAQ_HOME, CONFIG['yara']['yss_base_dir'], 'logs'),
        os.path.join(SAQ_HOME, maliciousdir) ]:
        try:
            if not os.path.isdir(dir_path):
                os.makedirs(dir_path)
        except Exception as e:
            logging.error("unable to create required directory {}: {}".format(dir_path, str(e)))
            sys.exit(1)

    # make sure the collection directory for each enabled engine exists
    for section in CONFIG.keys():
        if section.startswith('engine_'):
            engine_config = CONFIG[section]
            if 'collection_dir' in engine_config:
                collection_dir = os.path.join(SAQ_HOME, engine_config['collection_dir'])
                if not os.path.isdir(collection_dir):
                    logging.info("creating collection directory {} for {}".format(collection_dir, section))
                    try:
                        os.makedirs(collection_dir)
                    except Exception as e:
                        logging.error("unable to create directory {}: {}".format(collection_dir, e))
                        sys.exit(1)

    # clear out any proxy environment variables if they exist
    for proxy_key in [ 'http_proxy', 'https_proxy', 'ftp_proxy' ]:
        if proxy_key in os.environ:
            logging.debug("removing proxy environment variable for {}".format(proxy_key))
            del os.environ[proxy_key]

    # set up the PROXY global dict (to be used with the requests library)
    for proxy_key in [ 'http', 'https' ]:
        if CONFIG['proxy']['host'] and CONFIG['proxy']['port'] and CONFIG['proxy']['transport']:
            if CONFIG['proxy']['user'] and CONFIG['proxy']['password']:
                PROXIES[proxy_key] = '{}://{}:{}@{}:{}'.format(CONFIG['proxy']['transport'], CONFIG['proxy']['user'], 
                CONFIG['proxy']['password'], CONFIG['proxy']['host'], CONFIG['proxy']['port'])
            else:
                PROXIES[proxy_key] = '{}://{}:{}'.format(CONFIG['proxy']['transport'], CONFIG['proxy']['host'], CONFIG['proxy']['port'])
            logging.debug("proxy for {} set to {}".format(proxy_key, PROXIES[proxy_key]))

    # load any additional proxies specified in the config sections proxy_*
    for section in CONFIG.keys():
        if section.startswith('proxy_'):
            proxy_name = section[len('proxy_'):]
            OTHER_PROXIES[proxy_name] = {}
            for proxy_key in [ 'http', 'https' ]:
                if CONFIG[section]['host'] and CONFIG[section]['port'] and CONFIG[section]['transport']:
                    if 'user' in CONFIG[section] and 'password' in CONFIG[section] \
                    and CONFIG[section]['user'] and CONFIG[section]['password']:
                        OTHER_PROXIES[proxy_name][proxy_key] = '{}://{}:{}@{}:{}'.format(
                        CONFIG[section]['transport'], CONFIG[section]['user'], CONFIG[section]['password'], 
                        CONFIG[section]['host'], CONFIG[section]['port'])
                    else:
                        OTHER_PROXIES[proxy_name][proxy_key] = '{}://{}:{}'.format(
                        CONFIG[section]['transport'], CONFIG[section]['host'], CONFIG[section]['port'])

    # load global constants
    import iptools
    
    MANAGED_NETWORKS = []
    for cidr in CONFIG['network_configuration']['managed_networks'].split(','):
        try:
            if cidr:
                MANAGED_NETWORKS.append(iptools.IpRange(cidr.strip()))
        except Exception as e:
            logging.error("invalid managed network {}: {}".format(cidr, str(e)))

    # are we running as a daemon?
    if args:
        DAEMON_MODE = args.daemon

    logging.debug("SAQ initialized")