예제 #1
0
def set_encryption_password(password, old_password=None, key=None):
    """Sets the encryption password for the system. If a password has already been set, then
       old_password can be provided to change the password. Otherwise, the old password is
       over-written by the new password.
       If the key parameter is None then the PRIMARY AES KEY is random. Otherwise, the given key is used.
       The default of a random key is fine."""
    assert isinstance(password, str)
    assert old_password is None or isinstance(old_password, str)
    assert key is None or (isinstance(key, bytes) and len(key) == 32)

    # has the encryption password been set yet?
    if encryption_key_set():
        # did we provide a password for it?
        if old_password is not None:
            # get the existing encryption password
            saq.ENCRYPTION_PASSWORD = get_aes_key(old_password)

    if saq.ENCRYPTION_PASSWORD is None:
        # otherwise we just make a new one
        if key is None:
            saq.ENCRYPTION_PASSWORD = Crypto.Random.OSRNG.posix.new().read(32)
        else:
            saq.ENCRYPTION_PASSWORD = key

    # now we compute the key to use to encrypt the encryption key using the user-supplied password
    salt = Crypto.Random.OSRNG.posix.new().read(
        saq.CONFIG['encryption'].getint('salt_size', fallback=32))
    iterations = saq.CONFIG['encryption'].getint('iterations', fallback=8192)
    result = PBKDF2(password, salt, 64, iterations)
    user_encryption_key = result[:
                                 32]  # the first 32 bytes is the user encryption key
    verification_key = result[
        32:]  # and the second 32 bytes is used for password verification

    create_directory(get_encryption_store_path())

    with open(os.path.join(get_encryption_store_path(), 'verification'),
              'wb') as fp:
        fp.write(verification_key)

    encrypted_encryption_key = encrypt_chunk(saq.ENCRYPTION_PASSWORD,
                                             password=user_encryption_key)
    with open(os.path.join(get_encryption_store_path(), 'key'), 'wb') as fp:
        fp.write(encrypted_encryption_key)

    with open(os.path.join(get_encryption_store_path(), 'salt'), 'wb') as fp:
        fp.write(salt)

    with open(os.path.join(get_encryption_store_path(), 'iterations'),
              'w') as fp:
        fp.write(str(iterations))
예제 #2
0
    def initialize_service_environment(self):

        # make sure these directories exist
        for dir_path in [self.incoming_dir, self.persistence_dir]:
            create_directory(dir_path)

        # load the remote node groups if we haven't already
        if not self.remote_node_groups:
            self.load_groups()

        # make sure at least one is loaded
        if not self.remote_node_groups:
            raise RuntimeError(
                "no RemoteNodeGroup objects have been added to {}".format(
                    self))

        # call any subclass-defined initialization routines
        self.initialize_collector()
예제 #3
0
파일: solera.py 프로젝트: unixfreak0037/ACE
    def execute_analysis(self, observable):
        analysis = self.create_analysis(observable)

        # where are we putting the pcap?
        pcap_dir = os.path.join(self.root.storage_dir, 'pcap', observable.id)
        create_directory(pcap_dir)
        pcap_zip_path = os.path.join(pcap_dir, f'{observable.id}.zip')

        bpf_filter = None

        #
        # NOTE the bpf filter doesn't seem to have any effect
        #

        # figure out what our filter should be based on the type of observable passed in
        if observable.type == F_IPV4:
            src = observable.value
            src_port = dst = dst_port = None
            bpf_filter = f'(host {src})'
            query = [f'ipv4_address="{src}"']
        elif observable.type == F_IPV4_CONVERSATION:
            src, dst = parse_ipv4_conversation(observable.value)
            src_port = dst_port = None
            bpf_filter = f'(host {src} and host {dst})'
            query = [f'ipv4_initiator="{src}"', f'ipv4_responder="{dst}"']
        elif observable.type == F_IPV4_FULL_CONVERSATION:
            src, src_port, dst, dst_port = parse_ipv4_full_conversation(
                observable.value)
            bpf_filter = f'((host {src} and port {src_port}) and (host {dst} and port {dst_port}))'
            query = [
                f'ipv4_initiator="{src}"', f'port_initiator="{src_port}"',
                f'ipv4_responder="{dst}"', f'port_responder="{dst_port}"'
            ]

        # ace stores everything in UTC -- solera either always uses some globally defined timezone
        # or it uses a timezone specified for the user (not sure)
        # in either case, translate the time to the timezone specified in the config
        extraction_time = observable.time if observable.time is not None else self.root.event_time
        start_time = extraction_time - create_timedelta(
            self.config['relative_time_before'])
        end_time = extraction_time + create_timedelta(
            self.config['relative_time_after'])

        start_time = start_time.astimezone(
            pytz.timezone(self.config['timezone']))
        end_time = end_time.astimezone(pytz.timezone(self.config['timezone']))

        start_time = start_time.strftime('%Y-%m-%dT%H:%M:%S')
        end_time = end_time.strftime('%Y-%m-%dT%H:%M:%S')

        logging.debug(
            f"collecting pcap from {observable} into {pcap_dir} "
            f"start time {start_time} end time {end_time} query {query} bpf_filter {bpf_filter}"
        )

        try:
            from SoleraConnector import SoleraConnector
            c = SoleraConnector(self.config['username'],
                                self.config['api_key'], self.config['ipv4'])

            # NOTE the appliances={} in the query part of the URL is not documented but seems to be required
            result = c.callAPI('GET', '/cmc_settings/appliances')
            appliance_ids = ','.join(
                [str(_['Appliance']['id']) for _ in result['result']])

            result = c.callAPI(
                'GET',
                '/pcap/download/query?appliances={}'.format(appliance_ids),
                {
                    'timespan': {
                        'start': start_time,
                        'end': end_time
                    },
                    'query': query,
                    'name': '{}.pcap'.format(str(uuid.uuid4())),
                    #'download': {
                    #'type': 3 },
                    #'filter': bpf_filter,
                },
                pcap_zip_path)

            # the result comes back as a zip file of pcaps (one per sensor)
            with zipfile.ZipFile(pcap_zip_path) as fp:
                fp.extractall(path=pcap_dir)

            try:
                # remove the zip file once we've extracted
                os.remove(pcap_zip_path)
            except Exception as e:
                logging.error(f"unable to delete {pcap_zip_path}: {e}")
                report_exception()

            # check that there is a pcap_dir
            if len(pcap_dir) > 0:
                # build command with correct pcap-ng files
                pcap_path = os.path.join(pcap_dir, 'merged.pcap')
                command = ['mergecap', '-w', pcap_path]
                command.extend(
                    os.path.join(pcap_dir, i) for i in os.listdir(pcap_dir))

                # merge all pcaps in pcap_dir to merged_pcap.pcapng
                p = Popen(command, stdout=PIPE, stderr=PIPE)
                _stdout, _stderr = p.communicate()

                if os.path.getsize(pcap_path) in [92, 0]:
                    # for pcap-ng (the default), a size of 72 bytes means the pcap is empty of content
                    # also, a file of 0 means the pcap data was missing entirely
                    # merging 2 or more empty (either 0 or 72 bytes) pcap-ng files gives a pcap of size 92 bytes
                    # so we remove those
                    logging.debug(f"removing empty pcap file {pcap_path}")
                    try:
                        os.remove(pcap_path)
                    except Exception as e:
                        logging.error(
                            f"unable to remove empty pcap file {pcap_path}: {e}"
                        )
                        report_exception()
                else:
                    # add it as an observable to the analysis
                    pcap_file = analysis.add_observable(
                        F_FILE,
                        os.path.relpath(pcap_path,
                                        start=self.root.storage_dir))
                    pcap_file.add_tag('pcap')
                    analysis.pcap_paths.append(pcap_file.value)

            return True

        except Exception as e:
            logging.error(f"unable to extract pcap from {observable}: {e}")
            report_exception()
            analysis.error = str(e)
            return True
예제 #4
0
파일: hunter.py 프로젝트: unixfreak0037/ACE
    def __init__(self, collector, hunt_type, rule_dirs, hunt_cls,
                 concurrency_limit, persistence_dir):
        assert isinstance(collector, Collector)
        assert isinstance(hunt_type, str)
        assert isinstance(rule_dirs, list)
        assert issubclass(hunt_cls, Hunt)
        assert concurrency_limit is None or isinstance(
            concurrency_limit, int) or isinstance(concurrency_limit, str)
        assert isinstance(persistence_dir, str)

        # reference to the collector (used to send the Submission objects)
        self.collector = collector

        # primary execution thread
        self.manager_thread = None

        # shutdown valve
        self.manager_control_event = threading.Event()
        self.wait_control_event = threading.Event()

        # control signal to reload the hunts (set by SIGHUP indirectly)
        self.reload_hunts_flag = False

        # the type of hunting this manager manages
        self.hunt_type = hunt_type

        # the list of directories that contain the hunt configuration ini files for this type of hunt
        self.rule_dirs = rule_dirs

        # the class used to instantiate the rules in the given rules directories
        self.hunt_cls = hunt_cls

        # sqlite3 database used to keep track of hunt persistence data
        create_directory(os.path.dirname(get_hunt_db_path(self.hunt_type)))
        if not os.path.exists(get_hunt_db_path(self.hunt_type)):
            with open_hunt_db(self.hunt_type) as db:
                c = db.cursor()
                # XXX have to support all future schemas here -- not a great design
                c.execute("""
CREATE TABLE hunt ( 
    hunt_name TEXT NOT NULL,
    last_executed_time timestamp,
    last_end_time timestamp )""")
                c.execute("""
CREATE UNIQUE INDEX idx_name ON hunt(hunt_name)""")
                db.commit()

        # the list of Hunt objects that are being managed
        self._hunts = []

        # the type of concurrency contraint this type of hunt uses (can be None)
        # use the set_concurrency_limit() function to change it
        self.concurrency_type = None

        # the local threading.Semaphore if the type is CONCURRENCY_TYPE_LOCAL_SEMAPHORE
        # or the string name of the network semaphore if tye type is CONCURRENCY_TYPE_NETWORK_SEMAPHORE
        self.concurrency_semaphore = None

        if concurrency_limit is not None:
            self.set_concurrency_limit(concurrency_limit)

        # this is set to True if load_hunts_from_config() is called
        # and used when reload_hunts_flag is set
        self.hunts_loaded_from_config = False
예제 #5
0
def initialize(saq_home=None, 
               config_paths=None, 
               logging_config_path=None, 
               args=None, 
               relative_dir=None):

    from saq.database import initialize_database, initialize_node, initialize_automation_user

    global API_PREFIX
    global AUTOMATION_USER_ID
    global CA_CHAIN_PATH
    global COMPANY_ID
    global COMPANY_NAME
    global CONFIG
    global CONFIG_PATHS
    global DAEMON_DIR
    global DAEMON_MODE
    global DATA_DIR
    global DEFAULT_ENCODING
    global DUMP_TRACEBACKS
    global ECS_SOCKET_PATH
    global ENCRYPTION_INITIALIZED
    global ENCRYPTION_PASSWORD
    global ENCRYPTION_PASSWORD_PLAINTEXT
    global EXCLUDED_SLA_ALERT_TYPES
    global EXECUTION_THREAD_LONG_TIMEOUT
    global FORCED_ALERTS
    global GLOBAL_SLA_SETTINGS
    global GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES
    global INSTANCE_TYPE
    global LOCK_TIMEOUT_SECONDS
    global LOG_DIRECTORY
    global LOG_LEVEL
    global MANAGED_NETWORKS
    global MODULE_STATS_DIR
    global OTHER_PROXIES 
    global OTHER_SLA_SETTINGS
    global PROXIES
    global SAQ_HOME
    global SAQ_NODE
    global SAQ_NODE_ID
    global SAQ_RELATIVE_DIR
    global SEMAPHORES_ENABLED
    global SERVICES_DIR
    global STATS_DIR
    global TEMP_DIR
    global TOR_PROXY

    SAQ_HOME = None
    SAQ_NODE = None
    SAQ_NODE_ID = None
    API_PREFIX = None
    SAQ_RELATIVE_DIR = None
    CONFIG = None
    CONFIG_PATHS = []
    DATA_DIR = None
    TEMP_DIR = None
    DEFAULT_ENCODING = None
    SEMAPHORES_ENABLED = False
    PROXIES = {}
    OTHER_PROXIES = {}
    TOR_PROXY = None
    # list of iptools.IpRange objects defined in [network_configuration]
    MANAGED_NETWORKS = None
    # set this to True to force all anlaysis to result in an alert being generated
    FORCED_ALERTS = False
    # the private key password for encrypting/decrypting archive files
    # NOTE this is the decrypted random string of bytes that is used to encrypt/decrypt using AES
    # NOTE both of these can stay None if encryption is not being used
    ENCRYPTION_PASSWORD = None
    # *this* is the password that is used to encrypt/decrypt the ENCRYPTION_PASSWORD at rest
    ENCRYPTION_PASSWORD_PLAINTEXT = None
    # set to True after we've initialized encryption
    ENCRYPTION_INITIALIZED = False

    # the global log level setting
    LOG_LEVEL = logging.INFO
    # global logging directory (relative to DATA_DIR)
    LOG_DIRECTORY = None

    # directory containing statistical runtime info
    STATS_DIR = None 
    MODULE_STATS_DIR = None

    # are we running as a daemon in the background?
    DAEMON_MODE = False

    # directory where pid files are stored for daemons
    DAEMON_DIR = None

    # directory where files are stored for running services
    SERVICES_DIR = None

    # path to the certifcate chain used by all SSL certs
    CA_CHAIN_PATH = None

    # what type of instance is this?
    INSTANCE_TYPE = INSTANCE_TYPE_PRODUCTION

    # SLA settings
    GLOBAL_SLA_SETTINGS = None
    OTHER_SLA_SETTINGS = []
    EXCLUDED_SLA_ALERT_TYPES = []

    # set to True to cause tracebacks to be dumped to standard output
    # useful when debugging or testing
    DUMP_TRACEBACKS = False

    # the amount of time (in seconds) that a lock in the locks table is valid
    LOCK_TIMEOUT_SECONDS = None

    # amount of time (in seconds) before a process blows up because a threaded module won't stop
    EXECUTION_THREAD_LONG_TIMEOUT = None

    # the company/custom this node belongs to
    COMPANY_NAME = None
    COMPANY_ID = None

    # go ahead and try to figure out what text encoding we're using
    DEFAULT_ENCODING = locale.getpreferredencoding()

    # list of observable types we want to exclude from whitelisting (via the GUI)
    GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES = []

    # do we want to force alerts?
    if args:
        FORCED_ALERTS = args.force_alerts

    # what is the root directory of the entire system?
    if saq_home is not None:
        SAQ_HOME = saq_home
    elif 'SAQ_HOME' in os.environ:
        SAQ_HOME = os.environ['SAQ_HOME']
    else:
        SAQ_HOME = '.'

    if not os.path.isdir(SAQ_HOME):
        sys.stderr.write("invalid root SAQ directory {0}\n".format(SAQ_HOME)) 
        sys.exit(1)

    # path to the unix socket for the encryption cache service
    ECS_SOCKET_PATH = os.path.join(SAQ_HOME, '.ecs')

    # XXX not sure we need this SAQ_RELATIVE_DIR anymore -- check it out
    # this system was originally designed to run out of /opt/saq
    # later we modified to run out of anywhere for command line correlation
    # when running the GUI in apache you have no control over the current working directory
    # so we specify what directory we'd *want* to be running out of here (even if we're not actually)
    # this only matters when loading alerts
    # this defaults to the current working directory
    SAQ_RELATIVE_DIR = os.path.relpath(os.getcwd(), start=SAQ_HOME)
    if relative_dir:
        SAQ_RELATIVE_DIR = relative_dir

    # load configuration file
    # defaults to $SAQ_HOME/etc/saq.ini
    if args:
        if args.config_paths:
            config_paths = args.config_paths

    if config_paths is None:
        config_paths = []
    
    # make each relative config path absolute to SAQ_HOME
    CONFIG_PATHS = [os.path.join(SAQ_HOME, p) if not os.path.isabs(p) else p for p in config_paths]

    # add any config files specified in SAQ_CONFIG_PATHS env var (command separated)
    if 'SAQ_CONFIG_PATHS' in os.environ:
        for config_path in os.environ['SAQ_CONFIG_PATHS'].split(','):
            config_path = config_path.strip()
            if not os.path.isabs(config_path):
                config_path = os.path.join(SAQ_HOME, config_path)
            if not os.path.exists(config_path):
                sys.stderr.write("WARNING: config path {} specified in SAQ_CONFIG_PATHS env var does not exist\n".format(config_path))
            else:
                if config_path not in CONFIG_PATHS:
                    CONFIG_PATHS.append(config_path)

    if UNIT_TESTING:
        # unit testing loads different configurations
        CONFIG_PATHS.append(os.path.join(SAQ_HOME, 'etc', 'saq.unittest.default.ini'))
        CONFIG_PATHS.append(os.path.join(SAQ_HOME, 'etc', 'saq.unittest.ini'))
    else:
        CONFIG_PATHS.append(os.path.join(SAQ_HOME, 'etc', 'saq.ini'))

    try:
        load_configuration()
    except Exception as e:
        sys.stderr.write("ERROR: unable to load configuration: {0}".format(str(e)))
        sys.exit(1)

    DATA_DIR = os.path.join(SAQ_HOME, CONFIG['global']['data_dir'])
    TEMP_DIR = os.path.join(DATA_DIR, CONFIG['global']['tmp_dir'])
    DAEMON_DIR = os.path.join(DATA_DIR, 'var', 'daemon')
    SERVICES_DIR = os.path.join(DATA_DIR, 'var', 'services')
    COMPANY_NAME = CONFIG['global']['company_name']
    COMPANY_ID = CONFIG['global'].getint('company_id')

    minutes, seconds = map(int, CONFIG['global']['lock_timeout'].split(':'))
    LOCK_TIMEOUT_SECONDS = (minutes * 60) + seconds
    EXECUTION_THREAD_LONG_TIMEOUT = CONFIG['global'].getint('execution_thread_long_timeout')

    # user specified log level
    LOG_LEVEL = logging.INFO
    if args:
        if args.log_level:
            LOG_LEVEL = args.log_level

    # make sure the logs directory exists
    LOG_DIRECTORY = os.path.join(DATA_DIR, 'logs')
    if not os.path.exists(LOG_DIRECTORY):
        try:
            os.mkdir(LOG_DIRECTORY)
        except Exception as e:
            sys.stderr.write("unable to mkdir {}: {}\n".format(LOG_DIRECTORY, e))
            sys.exit(1)

    # by default we log to the console
    if logging_config_path is None:
        logging_config_path = os.path.join(SAQ_HOME, 'etc', 'console_logging.ini')

    # we can override this on the command line
    # this is what we use for production engine settings
    if args:
        if args.logging_config_path:
            logging_config_path = args.logging_config_path
    
    # we can re-initialize later if we have to
    try:
        initialize_logging(logging_config_path) # this log file just gets some startup information
    except Exception as e:
        sys.exit(1)

    # has the encryption password been set yet?
    import saq.crypto
    from saq.crypto import get_aes_key, InvalidPasswordError

    if not saq.UNIT_TESTING:
        # are we prompting for the decryption password?
        if args and args.provide_decryption_password:
            while True:
                ENCRYPTION_PASSWORD_PLAINTEXT = getpass("Enter the decryption password:"******"invalid encryption password")
                    continue

                break

        elif saq.crypto.encryption_key_set():
            # if we're not prompting for it, are we running the encryption cache service yet?
            logging.debug("reading encryption password from ecs")
            ENCRYPTION_PASSWORD_PLAINTEXT = saq.crypto.read_ecs()
            if ENCRYPTION_PASSWORD_PLAINTEXT is not None:
                try:
                    ENCRYPTION_PASSWORD = get_aes_key(ENCRYPTION_PASSWORD_PLAINTEXT)
                except InvalidPasswordError:
                    logging.error("read password from ecs but the password is wrong")
                    ENCRYPTION_PASSWORD_PLAINTEXT = None

    ENCRYPTION_INITIALIZED = True

    GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES = [_.strip() for _ in 
                                               CONFIG['gui']['whitelist_excluded_observable_types'].split(',')]

    for o_type in GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES:
        if o_type not in VALID_OBSERVABLE_TYPES:
            logging.error(f"invalid observable type {o_type} specified in [gui] whitelist_excluded_observable_types")

    # make this a faster lookup
    GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES = set(GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES)

    # load global SLA settings
    GLOBAL_SLA_SETTINGS = SLA(None, 
                              CONFIG['SLA'].getboolean('enabled'),
                              CONFIG['SLA'].getint('time_to_dispo'),
                              CONFIG['SLA'].getint('approaching_warn'),
                              None, None)

    EXCLUDED_SLA_ALERT_TYPES = [x.strip() for x in CONFIG['SLA']['excluded_alert_types'].split(',')]

    # load all the other SLA settings
    for section in [s for s in CONFIG.keys() if s.startswith('SLA_')]:
        logging.debug("loading {}".format(section))
        OTHER_SLA_SETTINGS.append(SLA(section[len('SLA_'):],
                                      CONFIG[section].getboolean('enabled'),
                                      CONFIG[section].getint('time_to_dispo'),
                                      CONFIG[section].getint('approaching_warn'),
                                      CONFIG[section]['property'],
                                      CONFIG[section]['value']))

    # what node is this?
    try:
        SAQ_NODE = CONFIG['global']['node']
        if SAQ_NODE == 'AUTO':
            SAQ_NODE = socket.getfqdn()
    except Exception as e:
        sys.stderr.write("unable to get hostname: {}\n".format(e))
        sys.exit(1)

    # what prefix do other systems use to communicate to the API server for this node?
    try:
        API_PREFIX = CONFIG['api']['prefix']
        if API_PREFIX == 'AUTO':
            API_PREFIX = socket.getfqdn()
        logging.debug("node {} has api prefix {}".format(SAQ_NODE, API_PREFIX))
    except Exception as e:
        sys.stderr.write("unable to get hostname: {}\n".format(e))
        sys.exit(1)

    # what type of instance is this?
    if 'instance_type' in CONFIG['global']:
        INSTANCE_TYPE = CONFIG['global']['instance_type']
        if INSTANCE_TYPE not in [ INSTANCE_TYPE_PRODUCTION, INSTANCE_TYPE_QA, INSTANCE_TYPE_DEV ]:
            logging.warning("invalid instance type {}: defaulting to {}".format(INSTANCE_TYPE, INSTANCE_TYPE_PRODUCTION))
            INSTANCE_TYPE = INSTANCE_TYPE_PRODUCTION
    else:
        logging.warning("missing configuration instance_type in global section (defaulting to instance type {})".format(INSTANCE_TYPE_PRODUCTION))

    if FORCED_ALERTS: # lol
        logging.warning(" ****************************************************************** ")
        logging.warning(" ****************************************************************** ")
        logging.warning(" **** WARNING **** ALL ANALYSIS RESULTS IN ALERTS **** WARNING **** ")
        logging.warning(" ****************************************************************** ")
        logging.warning(" ****************************************************************** ")

    # warn if timezone is not UTC
    #if time.strftime("%z") != "+0000":
        #logging.warning("Timezone is not UTC. All ACE systems in a cluster should be in UTC.")

    # we can globally disable semaphores with this flag
    SEMAPHORES_ENABLED = CONFIG.getboolean('global', 'enable_semaphores')

    # some settings can be set to PROMPT
    for section in CONFIG.sections():
        for (name, value) in CONFIG.items(section):
            if value == 'PROMPT':
                CONFIG.set(section, name, getpass("Enter the value for {0}:{1}: ".format(section, name)))

    # make sure we've got the ca chain for SSL certs
    CA_CHAIN_PATH = os.path.join(SAQ_HOME, CONFIG['SSL']['ca_chain_path'])
    ace_api.set_default_ssl_ca_path(CA_CHAIN_PATH)

    # initialize the database connection
    initialize_database()

    # initialize fallback semaphores
    initialize_fallback_semaphores()

    # XXX get rid of this
    try:
        maliciousdir = CONFIG.get("global", "malicious")
    except:
        maliciousdir = "malicious"

    STATS_DIR = os.path.join(DATA_DIR, 'stats')
    MODULE_STATS_DIR = os.path.join(STATS_DIR, 'modules')

    # make sure some key directories exists
    for dir_path in [ 
        os.path.join(DATA_DIR, CONFIG['global']['node']),
        os.path.join(DATA_DIR, 'review', 'rfc822'),
        os.path.join(DATA_DIR, 'review', 'misc'),
        os.path.join(DATA_DIR, CONFIG['global']['error_reporting_dir']),
        STATS_DIR,
        MODULE_STATS_DIR,
        os.path.join(STATS_DIR, 'brocess'), # get rid of this
        os.path.join(STATS_DIR, 'metrics'),
        os.path.join(DATA_DIR, CONFIG['splunk_logging']['splunk_log_dir']),
        os.path.join(DATA_DIR, CONFIG['elk_logging']['elk_log_dir']),
        os.path.join(TEMP_DIR),
        SERVICES_DIR,
        DAEMON_DIR, ]: 
        #os.path.join(SAQ_HOME, maliciousdir) ]: # XXX remove
        try:
            create_directory(dir_path)
        except Exception as e:
            logging.error("unable to create required directory {}: {}".format(dir_path, str(e)))
            sys.exit(1)

    # clear out any proxy environment variables if they exist
    for proxy_key in [ 'http_proxy', 'https_proxy', 'ftp_proxy' ]:
        if proxy_key in os.environ:
            logging.debug("removing proxy environment variable for {}".format(proxy_key))
            del os.environ[proxy_key]

    # set up the PROXY global dict (to be used with the requests library)
    for proxy_key in [ 'http', 'https' ]:
        if CONFIG['proxy']['host'] and CONFIG['proxy']['port'] and CONFIG['proxy']['transport']:
            if CONFIG['proxy']['user'] and CONFIG['proxy']['password']:
                PROXIES[proxy_key] = '{}://{}:{}@{}:{}'.format(
                CONFIG['proxy']['transport'], 
                urllib.parse.quote_plus(CONFIG['proxy']['user']), 
                urllib.parse.quote_plus(CONFIG['proxy']['password']), 
                CONFIG['proxy']['host'], 
                CONFIG['proxy']['port'])
            else:
                PROXIES[proxy_key] = '{}://{}:{}'.format(CONFIG['proxy']['transport'], CONFIG['proxy']['host'], CONFIG['proxy']['port'])

            logging.debug("proxy for {} set to {}".format(proxy_key, PROXIES[proxy_key]))

    # load any additional proxies specified in the config sections proxy_*
    for section in CONFIG.keys():
        if section.startswith('proxy_'):
            proxy_name = section[len('proxy_'):]
            OTHER_PROXIES[proxy_name] = {}
            for proxy_key in [ 'http', 'https' ]:
                if CONFIG[section]['host'] and CONFIG[section]['port'] and CONFIG[section]['transport']:
                    if 'user' in CONFIG[section] and 'password' in CONFIG[section] \
                    and CONFIG[section]['user'] and CONFIG[section]['password']:
                        OTHER_PROXIES[proxy_name][proxy_key] = '{}://{}:{}@{}:{}'.format(
                        CONFIG[section]['transport'], 
                        urllib.parse.quote_plus(CONFIG[section]['user']), 
                        urllib.parse.quote_plus(CONFIG[section]['password']), 
                        CONFIG[section]['host'], 
                        CONFIG[section]['port'])
                    else:
                        OTHER_PROXIES[proxy_name][proxy_key] = '{}://{}:{}'.format(
                        CONFIG[section]['transport'], CONFIG[section]['host'], CONFIG[section]['port'])

    # load global constants
    import iptools
    
    MANAGED_NETWORKS = []
    for cidr in CONFIG['network_configuration']['managed_networks'].split(','):
        try:
            if cidr:
                MANAGED_NETWORKS.append(iptools.IpRange(cidr.strip()))
        except Exception as e:
            logging.error("invalid managed network {}: {}".format(cidr, str(e)))

    # are we running as a daemon?
    if args:
        DAEMON_MODE = args.daemon

    # make sure we've got the automation user set up
    initialize_automation_user()

    # initialize other systems
    initialize_message_system()

    logging.debug("SAQ initialized")