コード例 #1
0
def run():
    """
    Main loop. Run this TA forever
    """

    from aws_cloudwatch_conf import AWSCloudWatchConf, create_conf_monitor
    from splunk_ta_aws.common.aws_concurrent_data_loader import AwsDataLoaderManager

    logger.info("Start Cloudwatch TA")
    metas, stanzas, tasks = tacommon.get_configs(AWSCloudWatchConf,
                                                 "aws_cloudwatch", logger)

    if not tasks:
        return

    loader = AwsDataLoaderManager(tasks, 'splunk_ta_aws', 'cloudwatch')

    conf_change_handler = tacommon.get_file_change_handler(loader, logger)
    conf_monitor = create_conf_monitor(conf_change_handler)
    loader.add_timer(conf_monitor, time.time(), 10)

    orphan_checker = opm.OrphanProcessChecker(loader.stop)
    loader.add_timer(orphan_checker.check_orphan, time.time(), 1)

    # mon = acconf.MetricDimensionMonitor(stanzas, loader.tear_down)
    # freq = int(os.environ.get("cloudwatch_mon", 86400))
    # loader.add_timer(mon.check_changes, time.time() + freq, freq)

    loader.start()
    logger.info("End CloudWatch TA")
コード例 #2
0
def run():
    """
    Main loop. Run this TA forever
    """

    # Sleep 5 seconds here for KV store ready
    time.sleep(5)
    kc.setup_signal_handler(None, None)
    kconfig = kc.create_kafka_config()
    task_configs = kconfig.get_task_configs()
    if not task_configs and not kconfig.is_dispatcher():
        return

    loader = kcdl.KafkaDataLoaderManager(task_configs)
    kc.setup_signal_handler(loader, kconfig)

    monitor = fm.FileMonitor(_handle_file_changes(loader), _get_conf_files())
    loader.add_timer(monitor.check_changes, time.time(), 10)

    orphan_checker = opm.OrphanProcessChecker(loader.stop)
    loader.add_timer(orphan_checker.check_orphan, time.time(), 1)

    for i in range(15):
        if loader.received_stop_signal():
            return
        time.sleep(1)

    topic_interval = int(os.environ.get("kafka_topic_check_internval", 3600))
    if kconfig.is_dispatcher():
        topic_handler = _handle_topic_changes(loader, kconfig)
        loader.add_timer(topic_handler, time.time(), topic_interval)
    loader.start()
コード例 #3
0
def run():
    """
    Main loop. Run this TA forever
    """
    from splunk_ta_aws.modinputs.kinesis.aws_kinesis_conf import AWSKinesisConf, create_conf_monitor
    from splunk_ta_aws.common.aws_concurrent_data_loader import AwsDataLoaderManager

    logger.info("Start Kinesis TA")
    metas, _, tasks = tacommon.get_configs(AWSKinesisConf, "aws_kinesis",
                                           logger)

    if not tasks:
        return

    loader = AwsDataLoaderManager(tasks, 'splunk_ta_aws', 'kinesis')

    conf_change_handler = tacommon.get_file_change_handler(loader, logger)
    conf_monitor = create_conf_monitor(conf_change_handler)
    loader.add_timer(conf_monitor, time.time(), 10)

    orphan_checker = opm.OrphanProcessChecker(loader.stop)
    loader.add_timer(orphan_checker.check_orphan, time.time(), 1)

    # monitor shard changes
    # Disable it for ADDON-9537
    # streams = list(set([t[akc.stream_name] for t in tasks]))
    # shard_checker = akconf.ShardChangesChecker(tasks[0], streams, loader.stop)
    # loader.add_timer(shard_checker, time.time(), 120)

    loader.start()
    logger.info("End Kinesis TA")
コード例 #4
0
def run(collector_cls,
        settings,
        checkpoint_cls=None,
        config_cls=None,
        log_suffix=None):
    """
    Main loop. Run this TA forever
    """
    # This is for stdout flush
    utils.disable_stdout_buffer()

    # http://bugs.python.org/issue7980
    time.strptime("2016-01-01", "%Y-%m-%d")

    tconfig = tc.create_ta_config(settings, config_cls or tc.TaConfig,
                                  log_suffix)
    stulog.set_log_level(tconfig.get_log_level())
    task_configs = tconfig.get_task_configs()

    if not task_configs:
        stulog.logger.debug("No task and exiting...")
        return
    meta_config = tconfig.get_meta_config()

    if tconfig.is_shc_but_not_captain():
        # In SHC env, only captain is able to collect data
        stulog.logger.debug("This search header is not captain, will exit.")
        return

    loader = dl.create_data_loader(meta_config)

    jobs = [
        tdc.create_data_collector(
            loader,
            tconfig,
            meta_config,
            task_config,
            collector_cls,
            checkpoint_cls=checkpoint_cls or cpmgr.TACheckPointMgr,
        ) for task_config in task_configs
    ]

    # handle signal
    _setup_signal_handler(loader, settings["basic"]["title"])

    # monitor files to reboot
    if settings["basic"].get("monitor_file"):
        monitor = fm.FileMonitor(
            _handle_file_changes(loader),
            _get_conf_files(settings["basic"]["monitor_file"]),
        )
        loader.add_timer(monitor.check_changes, time.time(), 10)

    # add orphan process handling, which will check each 1 second
    orphan_checker = opm.OrphanProcessChecker(loader.tear_down)
    loader.add_timer(orphan_checker.check_orphan, time.time(), 1)

    loader.run(jobs)
コード例 #5
0
    def __init__(self):

        super(MyScript, self).__init__()
        self._canceled = False
        self._ew = None
        self._orphan_checker = opm.OrphanProcessChecker()

        self.input_name = None
        self.input_items = None
        self.enable_additional_notifications = False
コード例 #6
0
def _wait_for_tear_down(tear_down_q, loader):
    checker = opm.OrphanProcessChecker()
    while 1:
        try:
            go_exit = tear_down_q.get(block=True, timeout=2)
        except Queue.Empty:
            go_exit = checker.is_orphan()
            if go_exit:
                logger.info("%s becomes orphan, going to exit", os.getpid())

        if go_exit:
            break

    if loader is not None:
        loader.stop()
    logger.info("End of waiting for tear down signal")
def _wait_for_tear_down(tear_down_q, loader):
    checker = opm.OrphanProcessChecker()

    def do_wait():
        try:
            go_exit = tear_down_q.get(block=True, timeout=2)
        except Queue.Empty:
            go_exit = checker.is_orphan()
            if go_exit:
                logger.info("%s becomes orphan, going to exit", os.getpid())

        if go_exit and loader is not None:
            loader.stop()
        return go_exit

    return do_wait
コード例 #8
0
    def __init__(self, config, job_scheduler, event_writer):
        """
        @config: dict like object
        like object. Each element shall implement dict.get/[] like interfaces
        to get the value for a key.
        @job_scheduler: schedulering the jobs. shall implement get_ready_jobs
        @event_writer: write_events
        """

        self._settings = self._read_default_settings()
        self._config = config
        self._event_writer = event_writer
        self._scheduler = job_scheduler
        self._timer_queue = tq.TimerQueue()
        self._executor = ce.ConcurrentExecutor(self._settings)
        self._orphan_checker = opm.OrphanProcessChecker(None)
        self._started = False
コード例 #9
0
def run(collector_cls,
        settings,
        checkpoint_cls=None,
        config_cls=None,
        log_suffix=None,
        single_instance=True,
        cc_json_file=None):
    """
    Main loop. Run this TA forever
    """
    ta_short_name = settings["meta"]["name"].lower()

    # This is for stdout flush
    utils.disable_stdout_buffer()

    # http://bugs.python.org/issue7980
    time.strptime('2016-01-01', '%Y-%m-%d')

    loader = dl.create_data_loader()

    # handle signal
    _setup_signal_handler(loader, ta_short_name)

    # monitor files to reboot
    try:
        monitor = fm.FileMonitor(_handle_file_changes(loader),
                                 _get_conf_files(settings))
        loader.add_timer(monitor.check_changes, time.time(), 10)
    except Exception:
        stulog.logger.exception("Fail to add files for monitoring")

    # add orphan process handling, which will check each 1 second
    orphan_checker = opm.OrphanProcessChecker(loader.tear_down)
    loader.add_timer(orphan_checker.check_orphan, time.time(), 1)

    tconfig = tc.create_ta_config(settings,
                                  config_cls or tc.TaConfig,
                                  log_suffix,
                                  single_instance=single_instance)
    task_configs = tconfig.get_task_configs()

    if not task_configs:
        stulog.logger.debug("No task and exiting...")
        return
    meta_config = tconfig.get_meta_config()
    meta_config["cc_json_file"] = cc_json_file

    if tconfig.is_shc_member():
        # Don't support SHC env
        stulog.logger.error(
            "This host is in search head cluster environment , "
            "will exit.")
        return

    # In this case, use file for checkpoint
    if _is_checkpoint_dir_length_exceed_limit(tconfig,
                                              meta_config["checkpoint_dir"]):
        stulog.logger.error(
            "The length of the checkpoint directory path: '{}' "
            "is too long. The max length we support is {}",
            meta_config["checkpoint_dir"], __CHECKPOINT_DIR_MAX_LEN__)
        return

    jobs = [
        tdc.create_data_collector(loader,
                                  tconfig,
                                  meta_config,
                                  task_config,
                                  collector_cls,
                                  checkpoint_cls=checkpoint_cls
                                  or cpmgr.TACheckPointMgr)
        for task_config in task_configs
    ]

    loader.run(jobs)
コード例 #10
0
import splunktalib.orphan_process_monitor as opm
from splunk_ta_aws.common.s3util import create_s3_connection_from_keyname

# Event writer
event_writer = None


def create_s3_connection(bucket_name, key_name, key_id, secret_key,
                         session_key):
    region_rex = r"\d+_CloudTrail_([\w-]+)_\d{4}\d{2}\d{2}T\d{2}\d{2}Z_.{16}" \
                 r"\.json\.gz$"
    return create_s3_connection_from_keyname(key_id, secret_key, session_key,
                                             bucket_name, key_name, region_rex)


_orphan_checker = opm.OrphanProcessChecker()


def orphan_check():
    """
    Check if this is orphan process.
    :return:
    """
    if _orphan_checker.is_orphan():
        raise InputCancellationError(
            'Input was stop. This is an orphan process.')


class InputCancellationError(Exception):
    """
    Input was stop. This is an orphan process.