def get_tasks(self):
        conf_mgr = cm.ConfManager(self.metas[tac.server_uri],
                                  self.metas[tac.session_key])
        tasks = self._get_config_rule_tasks(conf_mgr)

        settings = conf_mgr.all_stanzas_as_dicts(self.conf_file,
                                                 do_reload=False)
        proxy_info = tpc.get_proxy_info(self.metas[tac.session_key])
        # set proxy here for validating credentials
        tacommon.set_proxy_env(proxy_info)

        set_log_level(settings[tac.log_stanza][tac.log_level])

        valid_tasks = []
        for task in tasks:
            try:
                # validate credentials
                tacommon.get_service_client(task, tac.config)
                task[tac.log_level] = settings[tac.log_stanza][tac.log_level]
                task.update(settings[tac.global_settings])
                task.update(proxy_info)
                valid_tasks.append(task)
            except Exception as e:
                input_name = scutil.extract_datainput_name(task[tac.name])
                logger.exception(
                    'Failed to load credentials, ignore this input.',
                    datainput=input_name)
        return tacommon.handle_hec(valid_tasks, "aws_config_rule")
    def prepare(self, app, config):
        settings = config.load('aws_sqs')

        # Set Logging
        level = settings['logging']['log_level']
        set_log_level(level)

        inputs = config.load('aws_sqs_tasks')

        # If config is empty, do nothing and return.
        if not inputs:
            logger.info('No Task Configured')
            return

        logger.debug('AWS SQS Input Discover')

        # Set Proxy
        proxy = ProxySettings.load(config)
        proxy.hook_boto3_get_proxies()

        scheduler = app.create_task_scheduler(self.perform)

        # Generate Tasks
        for name, item in inputs.items():
            if scutil.is_true(item.get('disabled', '0')):
                continue
            item['datainput'] = name
            self.generate_tasks(name, item, scheduler)

        scheduler.run([app.is_aborted, config.has_expired])
        return 0
예제 #3
0
    def _get_tasks(self):
        if not self.stanza_configs:
            return None

        conf_mgr = cm.ConfManager(self.metas[tac.server_uri],
                                  self.metas[tac.session_key])
        logging_settings = conf_mgr.get_stanza(self.log_info,
                                               asc.log_stanza,
                                               do_reload=False)
        # set the log level read from conf for our logger
        set_log_level(logging_settings[asc.log_level])

        # entry point for this stanza task, setup root logger here
        # Generic S3 can be configured to be single-instance
        # or multiple instance
        # through env variable
        stanza_name = ''
        try:
            if len(self.stanza_configs) == 1:
                # only one stanza exists
                stanza_name = self.stanza_configs[0].get('name', '')
        except Exception:
            logger.exception('Failed to get stanza name!')

        stanza_name = extract_datainput_name(stanza_name)
        logging.setup_root_logger(app_name=tac.splunk_ta_aws,
                                  modular_name=asc.mod_name,
                                  stanza_name=stanza_name)

        proxy_info = tpc.get_proxy_info(self.metas[tac.session_key])
        tasks, creds = [], {}
        for stanza in self.stanza_configs:
            task = {}
            task.update(stanza)
            task.update(self.metas)
            task.update(proxy_info)
            task[tac.log_level] = logging_settings[asc.log_level]
            task[tac.interval] = tacommon.get_interval(task, 3600)
            task[tac.polling_interval] = task[tac.interval]
            task[asc.max_retries] = int(task.get(asc.max_retries, 3))
            task[asc.prefix] = task.get(asc.key_name)
            task[asc.last_modified] = self._get_last_modified_time(
                task[asc.initial_scan_datetime])
            task[
                asc.
                terminal_scan_datetime] = self._convert_terminal_scan_datetime(
                    task.get(asc.terminal_scan_datetime))
            input_name = scutil.extract_datainput_name(task[tac.name])
            task[asc.data_input] = input_name
            task[tac.sourcetype] = task.get(tac.sourcetype, "aws:s3")
            task[asc.bucket_name] = str(task[asc.bucket_name])
            if not task.get(asc.whitelist):
                task[asc.whitelist] = s3common.sourcetype_to_keyname_regex.get(
                    task[tac.sourcetype])
            tasks.append(task)
            logger.info("Done with configuration read from conf.")
        s3ckpt.handle_ckpts(tasks)
        return tasks
예제 #4
0
    def get_tasks(self):
        conf_mgr = cm.ConfManager(self.metas[tac.server_uri],
                                  self.metas[tac.session_key])
        tasks = self._get_cloudwatch_logs_tasks(conf_mgr)

        logging_settings = conf_mgr.get_stanza(self.conf_file,
                                               tac.log_stanza,
                                               do_reload=False)

        set_log_level(logging_settings[tac.log_level])

        proxy_info = tpc.get_proxy_info(self.metas[tac.session_key])

        for task in tasks:
            task[tac.log_level] = logging_settings[tac.log_level]
            task.update(proxy_info)
        return tasks
예제 #5
0
    def get_tasks(self):
        conf_mgr = cm.ConfManager(self.metas[tac.server_uri],
                                  self.metas[tac.session_key])
        all_tasks = self._get_kinesis_tasks(conf_mgr)

        settings = conf_mgr.all_stanzas_as_dicts(self.conf_file,
                                                 do_reload=False)

        # set logging level for our logger
        set_log_level(settings[tac.log_stanza][tac.log_level])

        for task in all_tasks:
            task[tac.log_level] = settings[tac.log_stanza][tac.log_level]
            task.update(settings[tac.global_settings])

        ackpt.clean_up_ckpt_for_deleted_data_input(all_tasks)

        return tacommon.handle_hec(all_tasks, "aws_kinesis")
예제 #6
0
    def _prepare(self):
        if self.input_item['aws_account'] \
                not in self.configs[Configs.ACCOUNTS]:
            raise Exception('AWS account not found for datainput')

        # Set Logging
        set_log_level(
            self.configs[Configs.SETTINGS_LOGGING]['logging']['level'])
        logger.debug('Running Started', datainput=self.input_name)

        # Set Proxy
        tacommon.set_proxy_env(self.configs[Configs.SETTINGS_PROXY])

        aws_account_name = self.input_item['aws_account']
        self.aws_account = self.configs[Configs.ACCOUNTS][aws_account_name]
        self.sqs_client = get_sqs_client(self.input_item['aws_region'],
                                         self.aws_account.get('key_id'),
                                         self.aws_account.get('secret_key'),
                                         self.aws_account.get('token'))
    def get_tasks(self):
        conf_mgr = cm.ConfManager(self.metas[tac.server_uri],
                                  self.metas[tac.session_key])
        tasks = self._get_description_tasks(conf_mgr)

        logging_settings = conf_mgr.get_stanza(self.conf_file,
                                               tac.log_stanza,
                                               do_reload=False)

        # set logging level for our logger
        set_log_level(logging_settings[tac.log_level])

        proxy_info = tpc.get_proxy_info(self.metas[tac.session_key])

        # Set proxy for loading credentials by boto3
        tacommon.set_proxy_env(proxy_info)

        for task in tasks:
            task[tac.log_level] = logging_settings[tac.log_level]
            task.update(proxy_info)

        self._assign_source(tasks)
        return tasks
예제 #8
0
    def get_tasks(self):
        conf_mgr = cm.ConfManager(self.metas[tac.server_uri],
                                  self.metas[tac.session_key])
        stanzas = conf_mgr.all_stanzas(self.task_file, do_reload=False)
        settings = conf_mgr.all_stanzas_as_dicts(self.conf_file,
                                                 do_reload=False)
        proxy_info = tpc.get_proxy_info(self.metas[tac.session_key])
        # set proxy here for validating credentials
        tacommon.set_proxy_env(proxy_info)

        level = settings[tac.log_stanza][tac.log_level]
        set_log_level(level)

        tasks = self._get_inspector_tasks(stanzas, settings, proxy_info)

        config = dict()
        config.update(self.metas)
        config.update(settings[tac.global_settings])
        _cleanup_checkpoints(tasks, config)
        tasks = [
            task for task in tasks if not scutil.is_true(task.get('disabled'))
        ]
        return tacommon.handle_hec(tasks, "aws_inspector")
    def _stream_events(self, inputs, ew):
        """helper function"""
        loglevel = get_level('aws_config', self.service.token, appName=APPNAME)

        set_log_level(loglevel)

        logger.log(
            logging.INFO,
            "STARTED: {}".format(len(sys.argv) > 1 and sys.argv[1] or ''))
        logger.log(logging.DEBUG, "Start streaming.")
        self._ew = ew

        if os.name == 'nt':
            import win32api
            win32api.SetConsoleCtrlHandler(self._exit_handler, True)
        else:
            import signal
            signal.signal(signal.SIGTERM, self._exit_handler)
            signal.signal(signal.SIGINT, self._exit_handler)

        # because we only support one stanza...
        self.input_name, self.input_items = inputs.inputs.popitem()

        self.enable_additional_notifications = (
            self.input_items.get('enable_additional_notifications')
            or 'false').lower() in ('1', 'true', 'yes', 'y', 'on')
        # self.configure_blacklist()

        base_sourcetype = self.input_items.get("sourcetype") or "aws:config"
        session_key = self.service.token
        key_id, secret_key = tac.get_aws_creds(self.input_items,
                                               inputs.metadata, {})

        # Try S3 Connection
        s3_conns = {}

        # Create SQS Connection
        sqs_conn = s3util.connect_sqs(self.input_items['aws_region'], key_id,
                                      secret_key, self.service.token)

        if sqs_conn is None:
            # No recovering from this...
            logger.log(
                logging.FATAL, "Invalid SQS Queue Region: {}".format(
                    self.input_items['aws_region']))
            raise Exception("Invalid SQS Queue Region: {}".format(
                self.input_items['aws_region']))
        else:
            logger.log(logging.DEBUG, "Connected to SQS successfully")

        try:

            while not self._canceled:
                sqs_queue = s3util.get_queue(sqs_conn,
                                             self.input_items['sqs_queue'])

                if sqs_queue is None:
                    try:
                        # verify it isn't an auth issue
                        sqs_queues = sqs_conn.get_all_queues()
                    except boto.exception.SQSError as e:
                        logger.log(
                            logging.FATAL,
                            "sqs_conn.get_all_queues(): {} {}: {} - {}".format(
                                e.status, e.reason, e.error_code,
                                e.error_message))
                        raise
                    else:
                        logger.log(
                            logging.FATAL,
                            "sqs_conn.get_queue(): Invalid SQS Queue Name: {}".
                            format(self.input_items['sqs_queue']))
                        break

                sqs_queue.set_message_class(boto.sqs.message.RawMessage)

                # num_messages=10 was chosen based on aws pricing faq.
                # see request batch pricing: http://aws.amazon.com/sqs/pricing/
                notifications = sqs_queue.get_messages(num_messages=10,
                                                       visibility_timeout=20,
                                                       wait_time_seconds=20)
                logger.log(
                    logging.DEBUG,
                    "Length of notifications in sqs=%s for region=%s is: %s" %
                    (self.input_items['sqs_queue'],
                     self.input_items['aws_region'], len(notifications)))

                start_time = time.time()
                completed = []
                failed = []

                stats = {'written': 0}

                # if not notifications or self._canceled:
                #     continue

                # Exit if SQS returns nothing. Wake up on interval as specified on inputs.conf
                if len(notifications) == 0:
                    self._canceled = True
                    break

                for notification in notifications:
                    if self._canceled or self._check_orphan():
                        break

                    try:
                        envelope = json.loads(notification.get_body())
                    # What do we do with non JSON data? Leave them in the queue but recommend customer uses a SQS queue only for AWS Config?
                    except Exception as e:
                        failed.append(notification)
                        logger.log(
                            logging.ERROR,
                            "problems decoding notification JSON string: {} {}"
                            .format(type(e).__name__, e))
                        continue

                    if not isinstance(envelope, dict):
                        failed.append(notification)
                        logger.log(
                            logging.ERROR,
                            "This doesn't look like a valid Config message. Please check SQS settings."
                        )
                        continue

                    if all(key in envelope
                           for key in ("Type", "MessageId", "TopicArn",
                                       "Message")) and isinstance(
                                           envelope['Message'], basestring):
                        logger.log(
                            logging.DEBUG,
                            "This is considered a Config notification.")
                        try:
                            envelope = json.loads(envelope['Message'])
                            if not isinstance(envelope, dict):
                                failed.append(notification)
                                logger.log(
                                    logging.ERROR,
                                    "This doesn't look like a valid Config message. Please check SQS settings."
                                )
                                continue
                        except Exception as e:
                            failed.append(notification)
                            logger.log(
                                logging.ERROR,
                                "problems decoding message JSON string: {} {}".
                                format(type(e).__name__, e))
                            continue

                    if 'messageType' in envelope:
                        logger.log(
                            logging.DEBUG,
                            "This is considered a Config message. 'Raw Message Delivery' may be 'True'."
                        )
                        message = envelope
                    else:
                        failed.append(notification)
                        logger.log(
                            logging.ERROR,
                            "This doesn't look like a valid Config message. Please check SQS settings."
                        )
                        continue

                    ## Process: config notifications, history and snapshot notifications (additional)

                    # Process notifications with payload, check ConfigurationItemChangeNotification
                    msg_type = message.get('messageType', '')
                    if msg_type == 'ConfigurationItemChangeNotification':
                        logger.log(
                            logging.DEBUG,
                            "Consuming configuration change data in SQS payload."
                        )
                        # determine _time for the event
                        configurationItem = message.get(
                            'configurationItem', '')
                        configurationItemCaptureTime = configurationItem.get(
                            'configurationItemCaptureTime', '')
                        event_time = int(
                            calendar.timegm(
                                time.strptime(
                                    configurationItemCaptureTime.replace(
                                        "Z", "GMT"),
                                    "%Y-%m-%dT%H:%M:%S.%f%Z")))
                        # write the event
                        event = smi.Event(data=json.dumps(message),
                                          time=event_time,
                                          sourcetype=base_sourcetype +
                                          ":notification")
                        ew.write_event(event)
                        stats['written'] += 1
                        completed.append(notification)

                    # Process ConfigurationHistoryDeliveryCompleted notifications by fetching data from S3 buckets
                    elif msg_type == 'ConfigurationHistoryDeliveryCompleted' and message.get(
                            's3ObjectKey', '') != '' and message.get(
                                's3Bucket', '') != '':
                        logger.log(
                            logging.DEBUG,
                            "Consuming configuration history change data in S3 bucket."
                        )

                        bucket_name = message.get('s3Bucket', '')
                        key = message.get('s3ObjectKey', '')
                        logger.log(
                            logging.INFO,
                            "Consume config history from s3 with s3Bucket '{0}' s3ObjectKey '{1}'"
                            .format(bucket_name, key))

                        completed_buf, failed_buf = self.process_confighistory(
                            s3_conns, key_id, secret_key, session_key,
                            notification, bucket_name, key)
                        completed.extend(completed_buf)
                        failed.extend(failed_buf)
                        logger.log(
                            logging.DEBUG,
                            "Length of completed after reaching into s3bucket: {0}"
                            .format(len(completed)))

                    # Process ConfigurationSnapshotDeliveryCompleted notifications by fetching data from S3 buckets
                    elif msg_type == 'ConfigurationSnapshotDeliveryCompleted' and message.get(
                            's3ObjectKey', '') != '' and message.get(
                                's3Bucket', '') != '':
                        logger.log(
                            logging.DEBUG,
                            "Consuming configuration snapshot data in S3 bucket."
                        )

                        bucket_name = message.get('s3Bucket', '')
                        key = message.get('s3ObjectKey', '')
                        logger.log(
                            logging.INFO,
                            "Consume config snapshot from s3 with s3Bucket '{0}' s3ObjectKey '{1}'"
                            .format(bucket_name, key))

                        completed_buf, failed_buf = self.process_confighistory(
                            s3_conns, key_id, secret_key, session_key,
                            notification, bucket_name, key)
                        completed.extend(completed_buf)
                        failed.extend(failed_buf)
                        logger.log(
                            logging.DEBUG,
                            "Length of completed after reaching into s3bucket: {0}"
                            .format(len(completed)))

                    # # Ingest all other notification of types: ConfigurationSnapshot*etc. but only when enable_additional_notifications is true.
                    # elif self.enable_additional_notifications and msg_type.startswith("ConfigurationSnapshot"):
                    #     logger.log(logging.DEBUG, "Consuming additional notifications enabled")
                    #     notificationCreationTime = message.get('notificationCreationTime', '')
                    #     event_time = int(calendar.timegm(time.strptime(notificationCreationTime.replace("Z", "GMT"), "%Y-%m-%dT%H:%M:%S.%f%Z")))
                    #     # write the event
                    #     event = smi.Event(data=json.dumps(message),
                    #                   time=event_time,
                    #                   sourcetype=base_sourcetype+":additional")
                    #     ew.write_event(event)
                    #     stats['written'] += 1
                    #     completed.append(notification)

                    elif msg_type in [
                            'ComplianceChangeNotification',
                            'ConfigurationSnapshotDeliveryStarted',
                            'ConfigRulesEvaluationStarted'
                    ]:
                        logger.log(
                            logging.INFO,
                            'Ignore this message and delete the sqs messages.')
                        completed.append(notification)

                    else:
                        failed.append(notification)
                        logger.log(
                            logging.ERROR,
                            "This doesn't look like a Config notification or message. Please check SQS settings."
                        )
                        continue

                notification_delete_errors = 0
                # Delete ingested notifications
                if completed:
                    logger.log(
                        logging.INFO,
                        "Delete {0} completed messages from SQS".format(
                            len(completed)))
                    br = sqs_queue.delete_message_batch(completed)
                    if br.errors:
                        notification_delete_errors = len(br.errors)

                if failed:
                    logger.log(logging.DEBUG,
                               "sqs_queue.delete_message_batch(failed)")
                    logger.log(
                        logging.INFO,
                        "Delete {0} failed messages from SQS".format(
                            len(failed)))
                    br = sqs_queue.delete_message_batch(failed)
                    logger.log(logging.DEBUG,
                               "sqs_queue.delete_message_batch done")
                    if br.errors:
                        notification_delete_errors = len(br.errors)
                    failed_messages = ','.join([m.get_body() for m in failed])
                    logger.log(
                        logging.WARN,
                        "Invalid notifications have been removed from SQS : %s",
                        failed_messages)

                else:
                    logger.log(logging.INFO, (
                        "{} completed, {} failed while processing a notification batch of {}"
                        " [{} errors deleting {} notifications]"
                        "  Elapsed: {:.3f}s").format(
                            len(completed), len(failed), len(notifications),
                            notification_delete_errors, len(completed),
                            time.time() - start_time))

        except Exception as e:
            logger.log(logging.FATAL, "Outer catchall: %s: %s",
                       type(e).__name__, e)
예제 #10
0
 def setup_log_level(self):
     set_log_level(self._settings.log_level)
예제 #11
0
    def get_tasks(self):
        if not self.stanza_configs:
            return None

        conf_mgr = cm.ConfManager(self.server_uri, self.session_key)

        settings = conf_mgr.all_stanzas_as_dicts(self.conf_file,
                                                 do_reload=False)

        # set logging level for our logger
        set_log_level(settings[tac.log_stanza][tac.log_level])

        proxy_info = tpc.get_proxy_info(self.session_key)
        tasks, creds = {}, {}
        for stanza in self.stanza_configs:
            input_name = scutil.extract_datainput_name(stanza[tac.name])
            with logging.LogContext(datainput=input_name):
                stanza[tac.interval] = tacommon.get_interval(stanza, 60)
                stanza[tac.polling_interval] = stanza[tac.interval]
                stanza[acc.period] = int(stanza[acc.period])

                if stanza[acc.period] > 86400 or stanza[acc.period] < 60:
                    logger.error(
                        "Granularity(period) is not in range[60, 86400], ignore this input.",
                        Period=stanza[acc.period],
                        ErrorCode="ConfigurationError",
                        ErrorDetail=
                        "Invalid Granularity(period). It's out of range [60, 86400]."
                    )
                    continue

                if stanza[tac.polling_interval] % stanza[acc.period]:
                    logger.error(
                        "Polling interval is not multiple of period, ignore this input.",
                        Period=stanza[acc.period],
                        ErrorCode="ConfigurationError",
                        ErrorDetail=
                        "Polling interval should be a multiple of granularity(period)."
                    )
                    continue

                stanza[tac.datainput] = input_name
                stanza[tac.sourcetype] = stanza.get(tac.sourcetype,
                                                    "aws:cloudwatch")
                metric_names = stanza[acc.metric_names].strip()
                if metric_names != ".*":
                    metric_names = json.loads(metric_names)
                else:
                    metric_names = None
                stanza[acc.metric_names] = metric_names

                stanza[acc.metric_dimensions] = json.loads(
                    stanza[acc.metric_dimensions])
                stanza[acc.statistics] = json.loads(stanza[acc.statistics])

                stanza[tac.log_level] = settings[tac.log_stanza][tac.log_level]

                stanza[tac.aws_account] = stanza.get('aws_account')
                stanza[tac.aws_iam_role] = stanza.get('aws_iam_role')

                stanza[acc.use_metric_format] = scutil.is_true(
                    stanza.get(acc.use_metric_format, False))

                stanza.update(self.metas)
                stanza.update(proxy_info)
                stanza.update(settings[tac.global_settings])
                stanza[tac.use_hec] = scutil.is_true(
                    stanza.get(tac.use_hec, False))
                stanza[acc.max_api_saver_time] = \
                    int(stanza.get(acc.max_api_saver_time, 7200))

                region_tasks = {}
                tasks[stanza[tac.datainput]] = region_tasks
                for region in stanza[tac.aws_region].split(","):
                    region = region.strip()
                    if not region:
                        continue

                    task = {}
                    task.update(stanza)
                    task[tac.aws_region] = region
                    task[tac.region] = region
                    num, rtasks = self._expand_task(task)
                    if rtasks:
                        region_tasks[region] = rtasks
                    stanza[region] = num

                if not region_tasks:
                    logger.warning("No metric/dimension has been found.")
        all_tasks = []
        for region_tasks in tasks.itervalues():
            for rtasks in region_tasks.itervalues():
                all_tasks.extend(rtasks)
        tacommon.handle_hec(all_tasks, "aws_cloudwatch")

        return all_tasks