Example #1
0
 def __init__(self, app, config):
     super(FileEventWriter, self).__init__(app, config)
     self._logger_name = config.get('logger', 'default') + '_metric_events'
     self._logger = log.Logs().get_logger(self._logger_name)
     self._log_level = config.get('loglevel', 'INFO')
     log.Logs().set_level(self._log_level, self._logger_name)
     # reset the formater of log handler
     for handler in self._logger.handlers:
         handler.setFormatter(logging.Formatter('%(message)s'))
Example #2
0
    def test_set_level(self, monkeypatch):
        logger = log.Logs().get_logger("set_level")

        logger.debug("this is a test log")

        log.Logs().set_level(log.logging.DEBUG)
        logger.warning("this is a test log that can show")

        log.Logs().set_level(log.logging.ERROR, name="set_level")
        logger.warning("this is a test log that can not show")
Example #3
0
 def update_config(self, config):
     super(FileEventWriter, self).update_config(config)
     l_name = config.get('logger', 'default') + '_metric_events'
     if l_name != self._logger_name:
         self._logger_name = l_name
         self._logger = log.Logs().get_logger(self._logger_name)
     l_level = config.get('loglevel', 'INFO')
     if self._log_level != l_level:
         self._log_level = l_level
         log.Logs().set_level(self._log_level, self._logger_name)
Example #4
0
def test_log_enter_exit(monkeypatch):
    logger1 = log.Logs().get_logger("enter_exit1")
    logger2 = log.Logs().get_logger("enter_exit2")

    @log.log_enter_exit(logger1)
    def test1():
        pass

    @log.log_enter_exit(logger2)
    def test2():
        pass

    test1()
    test2()
Example #5
0
        def worker(logger_ref):
            native_logger = log.Logs().get_logger("test_multi_process")

            for i in range(100):
                logger_ref.debug("Log info from child process")
                native_logger.debug(
                    "Log info from child process on native logger")
Example #6
0
def get_logger(loglevel=None):
    if loglevel:
        set_log_level(loglevel)
    global global_logger
    if global_logger is None:
        global_logger = logs.Logs().get_logger(LOGGER_NAME)
        global_logger.setLevel(global_loglevel)
    return global_logger
    def __init__(self, ta_name, alert_name):
        self._alert_name = alert_name
        self._logger_name = alert_name + "_modalert"
        self._logger = log.Logs().get_logger(self._logger_name)
        super().__init__(sys.stdin.read(), self._logger, alert_name)
        self.setup_util_module = None
        self.setup_util = None
        self.result_handle = None
        self.ta_name = ta_name
        self.splunk_uri = self.settings.get("server_uri")
        self.setup_util = Setup_Util(self.splunk_uri, self.session_key, self._logger)

        self.rest_helper = TARestHelper(self._logger)
Example #8
0
def update_log_level(level):
    log_level = logging.INFO
    level = level.lower()
    if level == "debug":
        log_level = logging.DEBUG
    elif level == "info":
        log_level = logging.INFO
    elif level == "error":
        log_level = logging.ERROR
    else:
        log_level = logging.INFO
    _LOGGER.info("set the loglevel to %s", level)
    log.Logs().set_level(log_level, "validation_mi")
    return log_level
Example #9
0
def _get_logger(name, level=logging.INFO):
    with g_logger_lock:
        l = None
        if name in g_loggers:
            l = g_loggers[name]
        else:
            l = log.Logs().get_logger(name)
            g_loggers[name] = l
        lv = g_log_levels.get(name, 10000)
        if level < lv:
            # logging.DEBUG < INFO < WARN < ERROR
            l.setLevel(level)
            g_log_levels[name] = level
        return l
Example #10
0
    def test_multi_thread(self, monkeypatch):
        log.Logs.set_context(directory="/tmp/", namespace="unittest")
        logger = log.Logs().get_logger("test_multi_thread")

        logger.debug("Log info from main thread")

        def worker(logger_ref):
            native_logger = log.Logs().get_logger("test_multi_thread")

            for i in range(100):
                logger_ref.debug("Log info from child thread")
                native_logger.debug(
                    "Log info from child thread on native logger")

        for i in range(20):
            t = threading.Thread(target=worker, args=(logger, ))
            t.start()

        time.sleep(1)
Example #11
0
    def test_multi_process(self, monkeypatch):
        log.Logs.set_context(directory="/tmp/", namespace="unittest")
        logger = log.Logs().get_logger("test_multi_process")

        logger.debug("Log info from main process")

        def worker(logger_ref):
            native_logger = log.Logs().get_logger("test_multi_process")

            for i in range(100):
                logger_ref.debug("Log info from child process")
                native_logger.debug(
                    "Log info from child process on native logger")

        for _ in range(20):
            p = multiprocessing.Process(target=worker, args=(logger, ))
            p.start()

        time.sleep(1)
Example #12
0
def set_log_level(loglevel):
    if loglevel:
        global global_loglevel
        global_loglevel = loglevel
        if global_logger:
            logs.Logs().set_level(global_loglevel, LOGGER_NAME)
"""

import urllib
try:
    from urllib import urlencode
except:
    from urllib.parse import urlencode
from httplib2 import Http, ProxyInfo, socks
import splunk.admin as admin
from solnlib import log
from solnlib import conf_manager
from solnlib.utils import is_true
import json

log.Logs.set_context()
logger = log.Logs().get_logger('splunk_ta_uccexample_rh_oauth2_token')

# Map for available proxy type
_PROXY_TYPE_MAP = {
    'http': socks.PROXY_TYPE_HTTP,
    'http_no_tunnel': socks.PROXY_TYPE_HTTP_NO_TUNNEL,
    'socks4': socks.PROXY_TYPE_SOCKS4,
    'socks5': socks.PROXY_TYPE_SOCKS5,
}
"""
REST Endpoint of getting token by OAuth2 in Splunk Add-on UI Framework. T
"""


class splunk_ta_uccexample_rh_oauth2_token(admin.MConfigHandler):
    """
Example #14
0
    handleEdit method: controls the parameters and saves the values
    corresponds to handleractions = edit in restmap.conf
"""
import ta_sigsci_blacklist_alert_declare

import json
import splunk.clilib.cli_common as scc
import splunk.admin as admin

import solnlib.utils as utils
import solnlib.log as log
import solnlib.conf_manager as conf
import ta_sigsci_blacklist_alert_consts as setup_const

log.Logs.set_context(namespace="ta_sigsci_blacklist_alert")
logger = log.Logs().get_logger("setup")


def get_or_create_conf_file(conf_mgr, file_name):
    try:
        conf_file = conf_mgr.get_conf(file_name)
        return conf_file
    except conf.ConfManagerException as cme:
        conf_mgr._confs.create(file_name)
        return conf_mgr.get_conf(file_name, refresh=True)


def filter_eai_property(stanza):
    if isinstance(stanza, dict):
        for k in list(stanza.keys()):
            if k.startswith('eai:'):
Example #15
0
# encoding = utf-8
from builtins import object
from aob.aob_common.metric_collector import event_writer
from aob.aob_common.metric_collector import memory_event_writer
from aob.aob_common.metric_collector.metric_exception import MetricException
from aob.aob_common.metric_collector.number_metric_collector import NumberMetricCollector

from solnlib import pattern
from solnlib import log

import threading
from future.utils import with_metaclass

__all__ = ['Monitor']

logger = log.Logs().get_logger('metric_collector')

class Monitor(with_metaclass(pattern.Singleton, object)):
    EVENT_WRITERS = {
        'memory': memory_event_writer.MemoryEventWriter,
        'file': event_writer.FileEventWriter,
        'hec': event_writer.FileEventWriter  # TODO; implement a HEC writer
    }

    def __init__(self):
        self._app = None
        self._ewriter = None
        self._ewriter_type = None
        self.number_metric_collector = None
        self.worker_start_lock = threading.Lock()
    def stream_events(self, inputs, ew):
        input_items = {}
        input_name = list(inputs.inputs.keys())[0]
        input_items = inputs.inputs[input_name]

        # Create UTC timezone for conversion
        utc = pytz.utc
        params = {}
        start = 0

        zenoss_server = input_items.get("zenoss_server")
        username = input_items.get("zenoss_username")
        zenoss_realm = input_items.get("zenoss_realm")
        no_ssl_cert_check = int(input_items.get("no_ssl_cert_check"))
        # Since Disable=1 and Enable=0, negate bool() to keep alignment
        ssl_cert_check = not bool(no_ssl_cert_check)
        cafile = input_items.get("cafile")
        interval = int(input_items.get("interval", HOUR))
        start_date = input_items.get("start_date")
        index_closed = int(input_items.get("index_closed"))
        index_cleared = int(input_items.get("index_cleared"))
        index_archived = int(input_items.get("index_archived"))
        index_suppressed = int(input_items.get("index_suppressed"))
        index_repeats = int(input_items.get("index_repeats"))
        archive_threshold = int(input_items.get("archive_threshold"))
        checkpoint_delete_threshold = int(
            input_items.get("checkpoint_delete_threshold"))
        tzone = input_items.get("tzone")
        proxy_uri = input_items.get("proxy_uri")
        proxy_username = input_items.get("proxy_username")
        proxy_realm = input_items.get("proxy_realm")
        proxy_password = None

        meta_configs = self._input_definition.metadata

        # Generate logger with input name
        _, input_name = (input_name.split('//', 2))
        self.logger = log.Logs().get_logger('{}_input'.format(APP_NAME))

        # Log level configuration
        self.logger.setLevel('INFO')

        if index_closed: params = dict(index_closed=True)
        if index_cleared: params = dict(index_closed=True)
        if index_suppressed: params = dict(index_suppressed=True)
        if index_repeats: params = dict(index_repeats=True)

        try:
            if tzone:
                zenoss_tz = pytz.timezone(tzone)
            else:
                zenoss_tz = pytz.timezone(str(get_localzone()))
        except pytz.UnknownTimeZoneError as e:
            self.logger.warn(
                "Unknown Timezone {} - Using default UTC".format(e))
            zenoss_tz = pytz.timezone("utc")

        # Get UTC timestamp
        utc_now = datetime.utcnow().replace(tzinfo=utc)
        # Convert to Zenoss server timezone
        now_local = utc_now.astimezone(zenoss_tz)
        # Create local time string
        now_str = now_local.strftime(DATE_FORMAT)

        # Load checkpoint file
        self.chk = checkpointer.FileCheckpointer(
            meta_configs['checkpoint_dir'])

        if self.chk.get("run_from") is None:
            # Initializing keys in checkpoint
            self.chk.update("run_from", start_date)
            self.chk.update("last_run", None)
            self.chk.update("last_cleaned", now_str)

        try:
            device = input_items.get("device")
        except Exception:
            device = None

        # Get password from storage password
        try:
            password = self.get_password(zenoss_realm, username,
                                         meta_configs['session_key'])
        except Exception as e:
            self.logger.error(
                "Failed to get password for user %s, realm %s. Verify credential account exists. User who scheduled alert must have Admin privileges. - %s"
                % (username, zenoss_realm, e))
            sys.exit(1)

        if proxy_username is not None:
            try:
                proxy_password = self.get_password(proxy_realm, proxy_username,
                                                   meta_configs['session_key'])
            except Exception as e:
                self.logger.error(
                    "Failed to get password for user %s, realm %s. Verify credential account exists. User who scheduled alert must have Admin privileges. - %s"
                    % (proxy_username, proxy_realm, e))
                sys.exit(1)

        while True:
            run_from = self.chk.get("run_from")
            # When none --> get ALL events, otherwise from users' specified date

            # Work with datetimes in UTC and then convert to timezone of Zenoss server
            utc_dt = utc.localize(datetime.utcnow())
            now_local = zenoss_tz.normalize(utc_dt.astimezone(zenoss_tz))
            now_epoch = calendar.timegm(now_local.utctimetuple())
            cur_time = now_local.strftime(DATE_FORMAT)

            # Connect to Zenoss web interface and get events
            try:
                z = ZenossAPI(zenoss_server, username, password, proxy_uri,
                              proxy_username, proxy_password, ssl_cert_check,
                              cafile)
            except Exception as e:
                log_message = "Zenoss Events: Failed to connect to server %s as user %s - Error: %s" % (
                    zenoss_server, username, e)
                self.logger.error("{}. Exiting.".format(log_message))
                sys.exit(1)

            # Initializing data
            events_dict = {
                "run_from": self.chk.get("run_from"),
                "last_run": self.chk.get("last_run"),
                "last_cleaned": self.chk.get("last_cleaned")
            }

            # Get Events
            events_dict = self.get_and_process_events(z, events_dict, ew,
                                                      params, device, start,
                                                      run_from, index_closed,
                                                      index_cleared,
                                                      index_suppressed)

            # Update last run timestamp
            events_dict['last_run'] = cur_time

            # Processed archived events
            if index_archived:
                # Get last archive read, convert and create epoch timestamp
                try:
                    last_archive_read = self.chk.get('last_archive_read')
                    if last_archive_read is None:
                        # Key does not exist in checkpoint
                        raise Exception
                    archive_delta = self.calc_epoch_delta(
                        last_archive_read, DATE_FORMAT, now_epoch, zenoss_tz,
                        HOUR)
                except Exception:
                    last_archive_read = None
                    archive_delta = 0

                # Read the archived events table if it hasn't been read or
                # last read exceeds archive threshold
                if archive_delta >= archive_threshold or \
                   not last_archive_read:
                    log_message = "Zenoss Events: Processing Archived Events\n" % params
                    self.logger.info(log_message)
                    self.get_and_process_events(z,
                                                events_dict,
                                                ew,
                                                params,
                                                device,
                                                start,
                                                run_from,
                                                index_closed,
                                                index_cleared,
                                                index_suppressed,
                                                archive=True)
                    events_dict['last_archive_read'] = cur_time

            # Clean checkpoint file
            try:
                last_cleaned = events_dict['last_cleaned']
                if last_cleaned is None:
                    # Key does not exist in checkpoint
                    raise Exception
            except Exception:
                last_cleaned = cur_time

            # Check to see if we need to clean the checkpoint file based on the
            # checkpoint delta threshold
            last_cleaned_delta = self.calc_epoch_delta(last_cleaned,
                                                       DATE_FORMAT, now_epoch,
                                                       zenoss_tz, DAY)
            keys_toclean = []

            # Clean checkpoint file of old archive records
            if last_cleaned_delta >= CHECKPOINT_CLEAN_FREQUENCY:
                for k in events_dict.keys():
                    if isinstance(events_dict[k],
                                  dict) and 'last_time' in events_dict[k]:
                        last_time = events_dict[k]['last_time']
                        epoch_delta = self.calc_epoch_delta(
                            last_time, "%Y-%m-%d %H:%M:%S", now_epoch,
                            zenoss_tz, DAY)
                        if epoch_delta >= int(checkpoint_delete_threshold):
                            keys_toclean.append(k)
                            self.chk.delete(k)

            # Update checkpoint file
            for key in events_dict.keys():
                if key in keys_toclean:
                    continue
                # dict2str to save among checkpoints
                value = events_dict[key]
                if isinstance(value, dict):
                    value = json.dumps(value)
                self.chk.update(key, value)

            time.sleep(float(interval))
Example #17
0
    def stream_events(self, inputs, ew):
        meta_configs = self._input_definition.metadata
        session_key = meta_configs['session_key']

        input_items = {}
        input_name = list(inputs.inputs.keys())[0]
        input_items = inputs.inputs[input_name]

        # Generate logger with input name
        _, input_name = (input_name.split('//', 2))
        logger = log.Logs().get_logger('{}_input'.format(APP_NAME))

        # Log level configuration
        log_level = get_log_level(session_key, logger)
        logger.setLevel(log_level)

        logger.debug("Modular input invoked.")

        try:
            account_name = input_items.get('global_account')
            account_details = get_account_details(session_key, account_name,
                                                  logger)

            username = account_details.get('username')
            password = account_details.get('password')
            if not username:
                logger.error(
                    "Username is required {} to resume updates".format(
                        account_name))
                sys.exit(1)
            if not password:
                logger.error(
                    "password is required {} to resume updates".format(
                        account_name))
                sys.exit(1)

            logger.debug(f"username is {username}")
            # Proxy configuration
            proxy_settings = get_proxy_settings(session_key, logger)

            edition_ids = input_items.get('edition_ids')

            with tempfile.NamedTemporaryFile(mode='w',
                                             suffix=".conf",
                                             prefix="GeoIP") as file:
                file.write("\nAccountID " + username)
                file.write("\nLicenseKey " + password)
                file.write("\nEditionIDs " + edition_ids + "\n")

                if proxy_settings == {}:
                    logger.debug("no proxy")
                else:
                    file.write("\nProxy " + proxy_settings["proxy_url"] + ":" +
                               proxy_settings["proxy_port"])
                    if not proxy_settings["proxy_username"] is None:
                        file.write("\nProxyUserPassword " +
                                   proxy_settings["proxy_username"] + ":" +
                                   proxy_settings["proxy_password"])

                file.flush()
                guargs = str(
                    os.path.expandvars(
                        "-v -d $SPLUNK_HOME/etc/apps/SecKit_SA_geolocation/data/ -f "
                        + file.name))

                try:
                    subprocess.check_output([
                        "$SPLUNK_HOME/etc/apps/SecKit_SA_geolocation/bin/geoipupdate/linux_amd64/geoipupdate "
                        + guargs
                    ],
                                            shell=True,
                                            stderr=subprocess.STDOUT)
                except CalledProcessError as e:
                    logger.exception(e)
                    logger.error("command args:\n")
                    logger.error(e.cmd)
                    logger.error("output:\n")
                    logger.error(e.output.decode("utf-8"))
                    sys.exit(1)

                mmdb_dir = os.path.expandvars(
                    '$SPLUNK_HOME/etc/apps/SecKit_SA_geolocation/data/')
                files = os.listdir(mmdb_dir)
                for name in files:
                    if name.endswith('.mmdb'):
                        inode = os.stat(os.path.join(mmdb_dir, name))
                        logger.info('mmdb=' + name + ' size=' +
                                    str(inode.st_size) + ' mtime=' +
                                    str(inode.st_mtime))
        except Exception as e:
            logger.exception(e)
            sys.exit(1)

        logger.debug("Modular input completed")
Example #18
0
import urllib
try:
    from urllib import urlencode
except:
    from urllib.parse import urlencode
import requests
from httplib2 import Http, ProxyInfo, socks
import splunk.admin as admin
from solnlib import log
from solnlib import conf_manager
from solnlib.utils import is_true
import json


log.Logs.set_context()
logger = log.Logs().get_logger('ta_lansweeper_rh_oauth')

# Map for available proxy type
_PROXY_TYPE_MAP = {
    'http': socks.PROXY_TYPE_HTTP,
    'http_no_tunnel': socks.PROXY_TYPE_HTTP_NO_TUNNEL,
    'socks4': socks.PROXY_TYPE_SOCKS4,
    'socks5': socks.PROXY_TYPE_SOCKS5,
}

"""
REST Endpoint of getting token by OAuth2 in Splunk Add-on UI Framework.
"""
class ta_lansweeper_add_on_for_splunk_rh_oauth2_token(admin.MConfigHandler):
    """
    This method checks which action is getting called and what parameters are required for the request.
Example #19
0
    def test_get_logger(self, monkeypatch):
        logger = log.Logs().get_logger("logging")

        logger.debug("this is a test log")
        logger.warning("this is a test log that can show")
    def stream_events(self, inputs, ew):
        meta_configs = self._input_definition.metadata
        session_key = meta_configs['session_key']

        input_items = {}
        input_name = list(inputs.inputs.keys())[0]
        input_items = inputs.inputs[input_name]

        # Generate logger with input name
        _, input_name = (input_name.split('//', 2))
        logger = log.Logs().get_logger(
            'ta_lansweeper_input_{}'.format(input_name))
        log_level = get_log_level(session_key, logger)
        logger.setLevel(log_level)

        logger.info("Modular input invoked.")

        account_name = input_items.get('account_name')
        account_details = get_account_details(session_key, account_name,
                                              logger)
        proxy_settings = get_proxy_settings(session_key, logger)
        client_id = account_details.get('client_id')
        client_secret = account_details.get('client_secret')
        access_token = account_details.get('access_token')
        refresh_token = account_details.get('refresh_token')
        site_name = input_items.get('site').split(',')
        logger.info("Site names: " + str(site_name))
        index = input_items.get('index')
        sites = []

        # Note - Do not uncomment below line in the production
        # logger.debug('Access token={}, ||| Refresh token={}'.format(access_token, refresh_token))

        lansweeper = Lansweeper(client_id=client_id,
                                client_secret=client_secret,
                                access_token=access_token,
                                refresh_token=refresh_token,
                                proxy_settings=proxy_settings,
                                logger=logger)
        try:
            # Get site id
            status_code, response = lansweeper.get_site_id(site_name)
            if status_code != 200:
                is_expired_response = lansweeper.is_token_expired(
                    status_code, response.text)
                if is_expired_response:
                    lansweeper.access_token = is_expired_response[
                        'access_token']
                    # Updating the access token and refresh token in the conf files
                    try:
                        update_access_token(
                            access_token=is_expired_response['access_token'],
                            refresh_token=refresh_token,
                            client_secret=client_secret,
                            session_key=session_key,
                            stanza_name=account_name)
                        logger.info(
                            'Successfully updated the new access token and refresh token in the conf file'
                        )
                    except Exception as exception:
                        logger.warning(
                            'Error while updating the access token and refresh token in the conf file, error={}'
                            .format(exception))

                    logger.debug("Retrying for site-id with new tokens.")
                    status_code, response = lansweeper.get_site_id(site_name)
                    if status_code != 200:
                        logger.error(
                            'Error while fetching the site id for site={}, status code={} response={}'
                            .format(site_name, status_code, response.text))
                        sys.exit(1)
                    else:
                        sites = response
                else:
                    logger.error(
                        'Error while fetching the site id for site={}, status code={} response={}'
                        .format(site_name, status_code, response.text))
                    sys.exit(1)
            else:
                logger.info(
                    'Successfully fetch the site code for site={}'.format(
                        site_name))
                sites = response
        except Exception as exception:
            logger.exception(
                'Error while fetching the site id for site={}, exception={}'.
                format(site_name, exception))
            sys.exit(1)

        logger.info("Site id: {}".format(sites))

        #Get asset information for all the sites
        logger.info('Starting to fetch assets data')
        for site in sites:
            try:
                cursor = ''
                page = 'FIRST'
                is_data = True
                site_name = list(site.keys())[0]
                site_id = site[site_name]
                while is_data:
                    status, response_code, response = lansweeper.get_asset_info(
                        site_id, cursor, page)
                    if not status:
                        is_expired_response = lansweeper.is_token_expired(
                            response_code, response.text)
                        if is_expired_response:
                            lansweeper.access_token = is_expired_response[
                                'access_token']
                            try:
                                # Updating the access token and refresh token in the conf files
                                update_access_token(
                                    access_token=is_expired_response[
                                        'access_token'],
                                    refresh_token=refresh_token,
                                    client_secret=client_secret,
                                    session_key=session_key,
                                    stanza_name=account_name)
                                logger.info(
                                    'Successfully updated the new access token and refresh token in the conf file'
                                )
                            except Exception as exception:
                                logger.warning(
                                    'Error while updating the access token and refresh token in the conf file, error={}'
                                    .format(exception))

                            continue

                        else:
                            logger.error(
                                'Error while fetching the assets for site={}, status code={} response={}'
                                .format(site_name, response_code,
                                        response.text))
                            break
                    else:
                        logger.info(
                            'Successfully fetch the assets for site={} page={}'
                            .format(site_name, page))
                        cursor, asset_data = response_code, response
                    write_event(asset_data, site_name, ew, index, logger)
                    is_data = cursor
                    page = 'NEXT'
                    logger.info(
                        'Fetching the next group of assets for site={} cursor={}'
                        .format(site_name, cursor))
            except Exception as exception:
                logger.exception(
                    'Error while fetching the assets for site={}'.format(
                        site_name))

        logger.info('Completed the data collection for the input')