Example #1
0
class EventHandler:

    def __init__(self, env: str, inventory_collection: str):
        super().__init__()
        self.inv = InventoryMgr()
        self.inv.set_collections(inventory_collection)
        self.env = env
        self.log = FullLogger(env=env)
        self.handlers = {}

    def discover_handlers(self, handlers_package: str, event_handlers: dict):
        if not event_handlers:
            raise TypeError("Event handlers list is empty")

        for event_name, handler_name in event_handlers.items():
            handler = ClassResolver.get_instance_of_class(handler_name, handlers_package)
            if not issubclass(handler.__class__, EventBase):
                raise TypeError("Event handler '{}' is not a subclass of EventBase"
                                .format(handler_name))
            if event_name in self.handlers:
                self.log.warning("A handler is already registered for event type '{}'. Overwriting"
                                 .format(event_name))
            self.handlers[event_name] = handler

    def handle(self, event_name: str, notification: dict) -> EventResult:
        if event_name not in self.handlers:
            self.log.info("No handler is able to process event of type '{}'"
                          .format(event_name))
        return self.handlers[event_name].handle(self.env, notification)
Example #2
0
class TestScan(unittest.TestCase):
    def configure_environment(self):
        self.env = ENV_CONFIG
        self.inventory_collection = COLLECTION_CONFIG
        # mock the mongo access
        MongoAccess.mongo_connect = MagicMock()
        MongoAccess.db = MagicMock()
        # mock log
        FullLogger.info = MagicMock()

        self.conf = Configuration()
        self.conf.use_env = MagicMock()
        self.conf.environment = CONFIGURATIONS
        self.conf.configuration = CONFIGURATIONS["configuration"]

        self.inv = InventoryMgr()
        self.inv.clear = MagicMock()
        self.inv.set_collections(self.inventory_collection)

        MonitoringSetupManager.server_setup = MagicMock()

        DbAccess.get_neutron_db_name = MagicMock()
        DbAccess.get_neutron_db_name.return_value = "neutron"

    def setUp(self):
        self.configure_environment()
Example #3
0
class TestFetch(unittest.TestCase):
    def setUp(self):
        self._mongo_connect = MongoAccess.mongo_connect
        self._mongo_db = MongoAccess.db
        self._db_access_conn = DbAccess.conn
        self._ssh_connect = SshConnection.connect
        self._ssh_conn_check_defs = SshConnection.check_definitions
        self._ssh_check_defs = SshConn.check_definitions

        self.req_patcher = patch("discover.fetchers.api.api_access.requests")
        self.requests = self.req_patcher.start()
        self.response = MagicMock()
        self.response.codes.ok = 200
        self.response.json = Mock(return_value=CORRECT_AUTH_CONTENT)
        self.response.status_code = self.requests.codes.ok
        self.requests.get.return_value = self.response
        self.requests.post.return_value = self.response

        self.ssh_patcher = patch("discover.fetchers.cli.cli_access.SshConn")
        self.ssh_conn = self.ssh_patcher.start().return_value

    def configure_environment(self):
        self.env = ENV_CONFIG
        self.inventory_collection = COLLECTION_CONFIG
        # mock the Mongo Access
        MongoAccess.mongo_connect = MagicMock()
        MongoAccess.db = MagicMock()

        self.conf = Configuration()
        self.conf.use_env = MagicMock()
        self.conf.environment = CONFIGURATIONS
        self.conf.configuration = CONFIGURATIONS["configuration"]

        self.inv = InventoryMgr()
        self.inv.set_collections(self.inventory_collection)
        DbAccess.conn = MagicMock()
        DbAccess.get_neutron_db_name = MagicMock()
        DbAccess.get_neutron_db_name.return_value = "neutron"
        SshConnection.connect = MagicMock()
        SshConnection.check_definitions = MagicMock()
        SshConn.check_definitions = MagicMock()

    def set_regions_for_fetcher(self, fetcher):
        self._regions = fetcher.regions
        fetcher.regions = REGIONS

    def reset_regions_for_fetcher(self, fetcher):
        fetcher.regions = self._regions

    def tearDown(self):
        MongoAccess.mongo_connect = self._mongo_connect
        MongoAccess.db = self._mongo_db
        DbAccess.conn = self._db_access_conn
        SshConnection.connect = self._ssh_connect
        SshConnection.check_definitions = self._ssh_conn_check_defs
        SshConn.check_definitions = self._ssh_check_defs
        self.req_patcher.stop()
        self.ssh_patcher.stop()
Example #4
0
class App:

    ROUTE_DECLARATIONS = {
        "/inventory": "resource.inventory.Inventory",
        "/links": "resource.links.Links",
        "/messages": "resource.messages.Messages",
        "/cliques": "resource.cliques.Cliques",
        "/clique_types": "resource.clique_types.CliqueTypes",
        "/clique_constraints": "resource.clique_constraints.CliqueConstraints",
        "/scans": "resource.scans.Scans",
        "/scheduled_scans": "resource.scheduled_scans.ScheduledScans",
        "/constants": "resource.constants.Constants",
        "/monitoring_config_templates":
            "resource.monitoring_config_templates.MonitoringConfigTemplates",
        "/aggregates": "resource.aggregates.Aggregates",
        "/environment_configs":
            "resource.environment_configs.EnvironmentConfigs",
        "/connection_tests": "resource.connection_tests.ConnectionTests",
        "/auth/tokens": "auth.tokens.Tokens"
    }

    responders_path = "api.responders"

    def __init__(self, mongo_config="", ldap_config="",
                 log_level="", inventory="", token_lifetime=86400):
        MongoAccess.set_config_file(mongo_config)
        self.inv = InventoryMgr()
        self.inv.set_collections(inventory)
        self.log = FullLogger()
        self.log.set_loglevel(log_level)
        self.ldap_access = LDAPAccess(ldap_config)
        Token.set_token_lifetime(token_lifetime)
        self.middleware = AuthenticationMiddleware()
        self.app = falcon.API(middleware=[self.middleware])
        self.app.add_error_handler(CalipsoApiException)
        self.set_routes(self.app)

    def get_app(self):
        return self.app

    def set_routes(self, app):
        for url in self.ROUTE_DECLARATIONS.keys():
            class_path = self.ROUTE_DECLARATIONS.get(url)
            module = self.responders_path + "." + \
                     class_path[:class_path.rindex(".")]
            class_name = class_path.split('.')[-1]
            module = importlib.import_module(module)
            class_ = getattr(module, class_name)
            resource = class_()
            app.add_route(url, resource)
Example #5
0
class ScanController(Fetcher):
    DEFAULTS = {
        "env": "",
        "mongo_config": "",
        "type": "",
        "inventory": "inventory",
        "scan_self": False,
        "parent_id": "",
        "parent_type": "",
        "id_field": "id",
        "loglevel": "INFO",
        "inventory_only": False,
        "links_only": False,
        "cliques_only": False,
        "monitoring_setup_only": False,
        "clear": False,
        "clear_all": False
    }

    def __init__(self):
        super().__init__()
        self.conf = None
        self.inv = None

    def get_args(self):
        # try to read scan plan from command line parameters
        parser = argparse.ArgumentParser()
        parser.add_argument("-m",
                            "--mongo_config",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["mongo_config"],
                            help="name of config file " +
                            "with MongoDB server access details")
        parser.add_argument("-e",
                            "--env",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["env"],
                            help="name of environment to scan \n"
                            "(default: " + self.DEFAULTS["env"] + ")")
        parser.add_argument("-t",
                            "--type",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["type"],
                            help="type of object to scan \n"
                            "(default: environment)")
        parser.add_argument("-y",
                            "--inventory",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["inventory"],
                            help="name of inventory collection \n"
                            "(default: 'inventory')")
        parser.add_argument("-s",
                            "--scan_self",
                            action="store_true",
                            help="scan changes to a specific object \n"
                            "(default: False)")
        parser.add_argument("-i",
                            "--id",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["env"],
                            help="ID of object to scan (when scan_self=true)")
        parser.add_argument("-p",
                            "--parent_id",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["parent_id"],
                            help="ID of parent object (when scan_self=true)")
        parser.add_argument("-a",
                            "--parent_type",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["parent_type"],
                            help="type of parent object (when scan_self=true)")
        parser.add_argument("-f",
                            "--id_field",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["id_field"],
                            help="name of ID field (when scan_self=true) \n"
                            "(default: 'id', use 'name' for projects)")
        parser.add_argument("-l",
                            "--loglevel",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["loglevel"],
                            help="logging level \n(default: '{}')".format(
                                self.DEFAULTS["loglevel"]))
        parser.add_argument("--clear",
                            action="store_true",
                            help="clear all data related to "
                            "the specified environment prior to scanning\n"
                            "(default: False)")
        parser.add_argument("--clear_all",
                            action="store_true",
                            help="clear all data prior to scanning\n"
                            "(default: False)")
        parser.add_argument("--monitoring_setup_only",
                            action="store_true",
                            help="do only monitoring setup deployment \n"
                            "(default: False)")

        # At most one of these arguments may be present
        scan_only_group = parser.add_mutually_exclusive_group()
        scan_only_group.add_argument("--inventory_only",
                                     action="store_true",
                                     help="do only scan to inventory\n" +
                                     "(default: False)")
        scan_only_group.add_argument("--links_only",
                                     action="store_true",
                                     help="do only links creation \n" +
                                     "(default: False)")
        scan_only_group.add_argument("--cliques_only",
                                     action="store_true",
                                     help="do only cliques creation \n" +
                                     "(default: False)")

        return parser.parse_args()

    def get_scan_plan(self, args):
        # PyCharm type checker can't reliably check types of document
        # noinspection PyTypeChecker
        return self.prepare_scan_plan(ScanPlan(args))

    def prepare_scan_plan(self, plan):
        # Find out object type if not specified in arguments
        if not plan.object_type:
            if not plan.object_id:
                plan.object_type = "environment"
            else:
                # If we scan a specific object, it has to exist in db
                scanned_object = self.inv.get_by_id(plan.env, plan.object_id)
                if not scanned_object:
                    exc_msg = "No object found with specified id: '{}'" \
                        .format(plan.object_id)
                    raise ScanArgumentsError(exc_msg)
                plan.object_type = scanned_object["type"]
                plan.parent_id = scanned_object["parent_id"]
                plan.type_to_scan = scanned_object["parent_type"]

        class_module = plan.object_type
        if not plan.scan_self:
            plan.scan_self = plan.object_type != "environment"

        plan.object_type = plan.object_type.title().replace("_", "")

        if not plan.scan_self:
            plan.child_type = None
        else:
            plan.child_id = plan.object_id
            plan.object_id = plan.parent_id
            if plan.type_to_scan.endswith("_folder"):
                class_module = plan.child_type + "s_root"
            else:
                class_module = plan.type_to_scan
            plan.object_type = class_module.title().replace("_", "")

        if class_module == "environment":
            plan.obj = {"id": plan.env}
        else:
            # fetch object from inventory
            obj = self.inv.get_by_id(plan.env, plan.object_id)
            if not obj:
                raise ValueError("No match for object ID: {}".format(
                    plan.object_id))
            plan.obj = obj

        plan.scanner_type = "Scan" + plan.object_type
        return plan

    def run(self, args: dict = None):
        args = setup_args(args, self.DEFAULTS, self.get_args)
        # After this setup we assume args dictionary has all keys
        # defined in self.DEFAULTS
        self.log.set_loglevel(args['loglevel'])

        try:
            MongoAccess.set_config_file(args['mongo_config'])
            self.inv = InventoryMgr()
            self.inv.log.set_loglevel(args['loglevel'])
            self.inv.set_collections(args['inventory'])
            self.conf = Configuration()
        except FileNotFoundError as e:
            return False, 'Mongo configuration file not found: {}'\
                .format(str(e))

        scan_plan = self.get_scan_plan(args)
        if scan_plan.clear or scan_plan.clear_all:
            self.inv.clear(scan_plan)
        self.conf.log.set_loglevel(scan_plan.loglevel)

        env_name = scan_plan.env
        self.conf.use_env(env_name)

        # generate ScanObject Class and instance.
        scanner = Scanner()
        scanner.log.set_loglevel(args['loglevel'])
        scanner.set_env(env_name)
        scanner.found_errors[env_name] = False

        # decide what scanning operations to do
        inventory_only = scan_plan.inventory_only
        links_only = scan_plan.links_only
        cliques_only = scan_plan.cliques_only
        monitoring_setup_only = scan_plan.monitoring_setup_only
        run_all = False if inventory_only or links_only or cliques_only \
            or monitoring_setup_only else True

        # setup monitoring server
        monitoring = \
            self.inv.is_feature_supported(env_name,
                                          EnvironmentFeatures.MONITORING)
        if monitoring:
            self.inv.monitoring_setup_manager = \
                MonitoringSetupManager(env_name)
            self.inv.monitoring_setup_manager.server_setup()

        # do the actual scanning
        try:
            if inventory_only or run_all:
                scanner.run_scan(scan_plan.scanner_type, scan_plan.obj,
                                 scan_plan.id_field, scan_plan.child_id,
                                 scan_plan.child_type)
            if links_only or run_all:
                scanner.scan_links()
            if cliques_only or run_all:
                scanner.scan_cliques()
            if monitoring:
                if monitoring_setup_only:
                    self.inv.monitoring_setup_manager.simulate_track_changes()
                if not (inventory_only or links_only or cliques_only):
                    scanner.deploy_monitoring_setup()
        except ScanError as e:
            return False, "scan error: " + str(e)
        SshConnection.disconnect_all()
        status = 'ok' if not scanner.found_errors.get(env_name, False) \
            else 'errors detected'
        if status == 'ok' and scan_plan.object_type == "environment":
            self.mark_env_scanned(scan_plan.env)
        self.log.info('Scan completed, status: {}'.format(status))
        return True, status

    def mark_env_scanned(self, env):
        environments_collection = self.inv.collection['environments_config']
        environments_collection \
            .update_one(filter={'name': env},
                        update={'$set': {'scanned': True}})
Example #6
0
class ScanManager(Manager):

    DEFAULTS = {
        "mongo_config": "",
        "scans": "scans",
        "scheduled_scans": "scheduled_scans",
        "environments": "environments_config",
        "interval": 1,
        "loglevel": "INFO"
    }

    def __init__(self):
        self.args = self.get_args()
        super().__init__(log_directory=self.args.log_directory,
                         mongo_config_file=self.args.mongo_config)
        self.db_client = None
        self.environments_collection = None
        self.scans_collection = None
        self.scheduled_scans_collection = None

    @staticmethod
    def get_args():
        parser = argparse.ArgumentParser()
        parser.add_argument("-m",
                            "--mongo_config",
                            nargs="?",
                            type=str,
                            default=ScanManager.DEFAULTS["mongo_config"],
                            help="Name of config file " +
                            "with MongoDB server access details")
        parser.add_argument("-c",
                            "--scans_collection",
                            nargs="?",
                            type=str,
                            default=ScanManager.DEFAULTS["scans"],
                            help="Scans collection to read from")
        parser.add_argument("-s",
                            "--scheduled_scans_collection",
                            nargs="?",
                            type=str,
                            default=ScanManager.DEFAULTS["scheduled_scans"],
                            help="Scans collection to read from")
        parser.add_argument("-e",
                            "--environments_collection",
                            nargs="?",
                            type=str,
                            default=ScanManager.DEFAULTS["environments"],
                            help="Environments collection to update "
                            "after scans")
        parser.add_argument("-i",
                            "--interval",
                            nargs="?",
                            type=float,
                            default=ScanManager.DEFAULTS["interval"],
                            help="Interval between collection polls"
                            "(must be more than {} seconds)".format(
                                ScanManager.MIN_INTERVAL))
        parser.add_argument("-l",
                            "--loglevel",
                            nargs="?",
                            type=str,
                            default=ScanManager.DEFAULTS["loglevel"],
                            help="Logging level \n(default: '{}')".format(
                                ScanManager.DEFAULTS["loglevel"]))
        parser.add_argument(
            "-d",
            "--log_directory",
            nargs="?",
            type=str,
            default=FileLogger.LOG_DIRECTORY,
            help="File logger directory \n(default: '{}')".format(
                FileLogger.LOG_DIRECTORY))
        args = parser.parse_args()
        return args

    def configure(self):
        self.db_client = MongoAccess()
        self.inv = InventoryMgr()
        self.inv.set_collections()
        self.scans_collection = self.db_client.db[self.args.scans_collection]
        self.scheduled_scans_collection = \
            self.db_client.db[self.args.scheduled_scans_collection]
        self.environments_collection = \
            self.db_client.db[self.args.environments_collection]
        self._update_document = \
            partial(MongoAccess.update_document, self.scans_collection)
        self.interval = max(self.MIN_INTERVAL, self.args.interval)
        self.log.set_loglevel(self.args.loglevel)

        self.log.info("Started ScanManager with following configuration:\n"
                      "Mongo config file path: {0.args.mongo_config}\n"
                      "Scans collection: {0.scans_collection.name}\n"
                      "Environments collection: "
                      "{0.environments_collection.name}\n"
                      "Polling interval: {0.interval} second(s)".format(self))

    def _build_scan_args(self, scan_request: dict):
        args = {
            'mongo_config': self.args.mongo_config,
            'scheduled': True if scan_request.get('interval') else False
        }

        def set_arg(name_from: str, name_to: str = None):
            if name_to is None:
                name_to = name_from
            val = scan_request.get(name_from)
            if val:
                args[name_to] = val

        set_arg("_id")
        set_arg("object_id", "id")
        set_arg("log_level", "loglevel")
        set_arg("environment", "env")
        set_arg("scan_only_inventory", "inventory_only")
        set_arg("scan_only_links", "links_only")
        set_arg("scan_only_cliques", "cliques_only")
        set_arg("inventory")
        set_arg("clear")
        set_arg("clear_all")

        return args

    def _finalize_scan(self, scan_request: dict, status: ScanStatus,
                       scanned: bool):
        scan_request['status'] = status.value
        self._update_document(scan_request)
        # If no object id is present, it's a full env scan.
        # We need to update environments collection
        # to reflect the scan results.
        if not scan_request.get('id'):
            self.environments_collection\
                .update_one(filter={'name': scan_request.get('environment')},
                            update={'$set': {'scanned': scanned}})

    def _fail_scan(self, scan_request: dict):
        self._finalize_scan(scan_request, ScanStatus.FAILED, False)

    def _complete_scan(self, scan_request: dict, result_message: str):
        status = ScanStatus.COMPLETED if result_message == 'ok' \
            else ScanStatus.COMPLETED_WITH_ERRORS
        self._finalize_scan(scan_request, status, True)

    # PyCharm type checker can't reliably check types of document
    # noinspection PyTypeChecker
    def _clean_up(self):
        # Find and fail all running scans
        running_scans = list(
            self.scans_collection.find(
                filter={'status': ScanStatus.RUNNING.value}))
        self.scans_collection \
            .update_many(filter={'_id': {'$in': [scan['_id']
                                                 for scan
                                                 in running_scans]}},
                         update={'$set': {'status': ScanStatus.FAILED.value}})

        # Find all environments connected to failed full env scans
        env_scans = [
            scan['environment'] for scan in running_scans
            if not scan.get('object_id') and scan.get('environment')
        ]

        # Set 'scanned' flag in those envs to false
        if env_scans:
            self.environments_collection\
                .update_many(filter={'name': {'$in': env_scans}},
                             update={'$set': {'scanned': False}})

    def _submit_scan_request_for_schedule(self, scheduled_scan, interval, ts):
        scans = self.scans_collection
        new_scan = {
            'status': 'submitted',
            'log_level': scheduled_scan['log_level'],
            'clear': scheduled_scan['clear'],
            'scan_only_inventory': scheduled_scan['scan_only_inventory'],
            'scan_only_links': scheduled_scan['scan_only_links'],
            'scan_only_cliques': scheduled_scan['scan_only_cliques'],
            'submit_timestamp': ts,
            'interval': interval,
            'environment': scheduled_scan['environment'],
            'inventory': 'inventory'
        }
        scans.insert_one(new_scan)

    def _set_scheduled_requests_next_run(self, scheduled_scan, interval, ts):
        scheduled_scan['scheduled_timestamp'] = ts + self.INTERVALS[interval]
        doc_id = scheduled_scan.pop('_id')
        self.scheduled_scans_collection.update({'_id': doc_id}, scheduled_scan)

    def _prepare_scheduled_requests_for_interval(self, interval):
        now = datetime.datetime.utcnow()

        # first, submit a scan request where the scheduled time has come
        condition = {
            '$and': [{
                'freq': interval
            }, {
                'scheduled_timestamp': {
                    '$lte': now
                }
            }]
        }
        matches = self.scheduled_scans_collection.find(condition) \
            .sort('scheduled_timestamp', pymongo.ASCENDING)
        for match in matches:
            self._submit_scan_request_for_schedule(match, interval, now)
            self._set_scheduled_requests_next_run(match, interval, now)

        # now set scheduled time where it was not set yet (new scheduled scans)
        condition = {
            '$and': [{
                'freq': interval
            }, {
                'scheduled_timestamp': {
                    '$exists': False
                }
            }]
        }
        matches = self.scheduled_scans_collection.find(condition)
        for match in matches:
            self._set_scheduled_requests_next_run(match, interval, now)

    def _prepare_scheduled_requests(self):
        # see if any scheduled request is waiting to be submitted
        for interval in self.INTERVALS.keys():
            self._prepare_scheduled_requests_for_interval(interval)

    def handle_scans(self):
        self._prepare_scheduled_requests()

        # Find a pending request that is waiting the longest time
        results = self.scans_collection \
            .find({'status': ScanStatus.PENDING.value,
                   'submit_timestamp': {'$ne': None}}) \
            .sort("submit_timestamp", pymongo.ASCENDING) \
            .limit(1)

        # If no scans are pending, sleep for some time
        if results.count() == 0:
            time.sleep(self.interval)
        else:
            scan_request = results[0]
            env = scan_request.get('environment')
            scan_feature = EnvironmentFeatures.SCANNING
            if not self.inv.is_feature_supported(env, scan_feature):
                self.log.error("Scanning is not supported for env '{}'".format(
                    scan_request.get('environment')))
                self._fail_scan(scan_request)
                return

            scan_request['start_timestamp'] = datetime.datetime.utcnow()
            scan_request['status'] = ScanStatus.RUNNING.value
            self._update_document(scan_request)

            # Prepare scan arguments and run the scan with them
            try:
                scan_args = self._build_scan_args(scan_request)

                self.log.info("Starting scan for '{}' environment".format(
                    scan_args.get('env')))
                self.log.debug("Scan arguments: {}".format(scan_args))
                result, message = ScanController().run(scan_args)
            except ScanArgumentsError as e:
                self.log.error("Scan request '{id}' "
                               "has invalid arguments. "
                               "Errors:\n{errors}".format(
                                   id=scan_request['_id'], errors=e))
                self._fail_scan(scan_request)
            except Exception as e:
                self.log.exception(e)
                self.log.error("Scan request '{}' has failed.".format(
                    scan_request['_id']))
                self._fail_scan(scan_request)
            else:
                # Check is scan returned success
                if not result:
                    self.log.error(message)
                    self.log.error("Scan request '{}' has failed.".format(
                        scan_request['_id']))
                    self._fail_scan(scan_request)
                    return

                # update the status and timestamps.
                self.log.info("Request '{}' has been scanned. ({})".format(
                    scan_request['_id'], message))
                end_time = datetime.datetime.utcnow()
                scan_request['end_timestamp'] = end_time
                self._complete_scan(scan_request, message)

    def do_action(self):
        self._clean_up()
        try:
            while True:
                self.handle_scans()
        finally:
            self._clean_up()
Example #7
0
class DefaultListener(ListenerBase, ConsumerMixin):

    SOURCE_SYSTEM = "OpenStack"
    COMMON_METADATA_FILE = "events.json"

    LOG_FILENAME = "default_listener.log"
    LOG_LEVEL = Logger.INFO

    DEFAULTS = {
        "env": "Mirantis-Liberty",
        "mongo_config": "",
        "metadata_file": "",
        "inventory": "inventory",
        "loglevel": "INFO",
        "environments_collection": "environments_config",
        "retry_limit": 10,
        "consume_all": False
    }

    def __init__(self, connection: Connection,
                 event_handler: EventHandler,
                 event_queues: List,
                 env_name: str = DEFAULTS["env"],
                 inventory_collection: str = DEFAULTS["inventory"],
                 retry_limit: int = DEFAULTS["retry_limit"],
                 consume_all: bool = DEFAULTS["consume_all"]):
        super().__init__()

        self.connection = connection
        self.retry_limit = retry_limit
        self.env_name = env_name
        self.consume_all = consume_all
        self.handler = event_handler
        self.event_queues = event_queues
        self.failing_messages = defaultdict(int)

        self.inv = InventoryMgr()
        self.inv.set_collections(inventory_collection)
        if self.inv.is_feature_supported(self.env_name, EnvironmentFeatures.MONITORING):
            self.inv.monitoring_setup_manager = \
                MonitoringSetupManager(self.env_name)

    def get_consumers(self, consumer, channel):
        return [consumer(queues=self.event_queues,
                         accept=['json'],
                         callbacks=[self.process_task])]

    # Determines if message should be processed by a handler
    # and extracts message body if yes.
    @staticmethod
    def _extract_event_data(body):
        if "event_type" in body:
            return True, body
        elif "event_type" in body.get("oslo.message", ""):
            return True, json.loads(body["oslo.message"])
        else:
            return False, None

    def process_task(self, body, message):
        received_timestamp = datetime.datetime.now()
        processable, event_data = self._extract_event_data(body)
        # If env listener can't process the message
        # or it's not intended for env listener to handle,
        # leave the message in the queue unless "consume_all" flag is set
        if processable and event_data["event_type"] in self.handler.handlers:
            event_result = self.handle_event(event_data["event_type"],
                                             event_data)
            finished_timestamp = datetime.datetime.now()
            self.save_message(message_body=event_data,
                              result=event_result,
                              started=received_timestamp,
                              finished=finished_timestamp)

            # Check whether the event was fully handled
            # and, if not, whether it should be retried later
            if event_result.result:
                message.ack()
            elif event_result.retry:
                if 'message_id' not in event_data:
                    message.reject()
                else:
                    # Track message retry count
                    message_id = event_data['message_id']
                    self.failing_messages[message_id] += 1

                    # Retry handling the message
                    if self.failing_messages[message_id] <= self.retry_limit:
                        self.inv.log.info("Retrying handling message " +
                                          "with id '{}'".format(message_id))
                        message.requeue()
                    # Discard the message if it's not accepted
                    # after specified number of trials
                    else:
                        self.inv.log.warn("Discarding message with id '{}' ".
                                          format(message_id) +
                                          "as it's exceeded the retry limit")
                        message.reject()
                        del self.failing_messages[message_id]
            else:
                message.reject()
        elif self.consume_all:
            message.reject()

    # This method passes the event to its handler.
    # Returns a (result, retry) tuple:
    # 'Result' flag is True if handler has finished successfully,
    #                  False otherwise
    # 'Retry' flag specifies if the error is recoverable or not
    # 'Retry' flag is checked only is 'result' is False
    def handle_event(self, event_type: str, notification: dict) -> EventResult:
        self.log.error("Got notification.\nEvent_type: {}\nNotification:\n{}".
                       format(event_type, notification))
        try:
            result = self.handler.handle(event_name=event_type,
                                         notification=notification)
            return result if result else EventResult(result=False, retry=False)
        except Exception as e:
            self.inv.log.exception(e)
            return EventResult(result=False, retry=False)

    def save_message(self, message_body: dict, result: EventResult,
                     started: datetime, finished: datetime):
        try:
            message = Message(
                msg_id=message_body.get('message_id'),
                env=self.env_name,
                source=self.SOURCE_SYSTEM,
                object_id=result.related_object,
                display_context=result.display_context,
                level=message_body.get('priority'),
                msg=message_body,
                ts=message_body.get('timestamp'),
                received_ts=started,
                finished_ts=finished
            )
            self.inv.collections['messages'].insert_one(message.get())
            return True
        except Exception as e:
            self.inv.log.error("Failed to save message")
            self.inv.log.exception(e)
            return False

    @staticmethod
    def listen(args: dict = None):

        args = setup_args(args, DefaultListener.DEFAULTS, get_args)
        if 'process_vars' not in args:
            args['process_vars'] = {}

        env_name = args["env"]
        inventory_collection = args["inventory"]

        MongoAccess.set_config_file(args["mongo_config"])
        inv = InventoryMgr()
        inv.set_collections(inventory_collection)
        conf = Configuration(args["environments_collection"])
        conf.use_env(env_name)

        event_handler = EventHandler(env_name, inventory_collection)
        event_queues = []

        env_config = conf.get_env_config()
        common_metadata_file = os.path.join(env_config.get('app_path', '/etc/calipso'),
                                            'config',
                                            DefaultListener.COMMON_METADATA_FILE)

        # import common metadata
        import_metadata(event_handler, event_queues, common_metadata_file)

        # import custom metadata if supplied
        if args["metadata_file"]:
            import_metadata(event_handler, event_queues, args["metadata_file"])

        logger = FullLogger()
        logger.set_loglevel(args["loglevel"])

        amqp_config = conf.get("AMQP")
        connect_url = 'amqp://{user}:{pwd}@{host}:{port}//' \
            .format(user=amqp_config["user"],
                    pwd=amqp_config["pwd"],
                    host=amqp_config["host"],
                    port=amqp_config["port"])

        with Connection(connect_url) as conn:
            try:
                print(conn)
                conn.connect()
                args['process_vars']['operational'] = OperationalStatus.RUNNING
                terminator = SignalHandler()
                worker = \
                    DefaultListener(connection=conn,
                                    event_handler=event_handler,
                                    event_queues=event_queues,
                                    retry_limit=args["retry_limit"],
                                    consume_all=args["consume_all"],
                                    inventory_collection=inventory_collection,
                                    env_name=env_name)
                worker.run()
                if terminator.terminated:
                    args.get('process_vars', {})['operational'] = \
                        OperationalStatus.STOPPED
            except KeyboardInterrupt:
                print('Stopped')
                args['process_vars']['operational'] = OperationalStatus.STOPPED
            except Exception as e:
                logger.log.exception(e)
                args['process_vars']['operational'] = OperationalStatus.ERROR
            finally:
                # This should enable safe saving of shared variables
                time.sleep(0.1)
Example #8
0
    def listen(args: dict = None):

        args = setup_args(args, DefaultListener.DEFAULTS, get_args)
        if 'process_vars' not in args:
            args['process_vars'] = {}

        env_name = args["env"]
        inventory_collection = args["inventory"]

        MongoAccess.set_config_file(args["mongo_config"])
        inv = InventoryMgr()
        inv.set_collections(inventory_collection)
        conf = Configuration(args["environments_collection"])
        conf.use_env(env_name)

        event_handler = EventHandler(env_name, inventory_collection)
        event_queues = []

        env_config = conf.get_env_config()
        common_metadata_file = os.path.join(env_config.get('app_path', '/etc/calipso'),
                                            'config',
                                            DefaultListener.COMMON_METADATA_FILE)

        # import common metadata
        import_metadata(event_handler, event_queues, common_metadata_file)

        # import custom metadata if supplied
        if args["metadata_file"]:
            import_metadata(event_handler, event_queues, args["metadata_file"])

        logger = FullLogger()
        logger.set_loglevel(args["loglevel"])

        amqp_config = conf.get("AMQP")
        connect_url = 'amqp://{user}:{pwd}@{host}:{port}//' \
            .format(user=amqp_config["user"],
                    pwd=amqp_config["pwd"],
                    host=amqp_config["host"],
                    port=amqp_config["port"])

        with Connection(connect_url) as conn:
            try:
                print(conn)
                conn.connect()
                args['process_vars']['operational'] = OperationalStatus.RUNNING
                terminator = SignalHandler()
                worker = \
                    DefaultListener(connection=conn,
                                    event_handler=event_handler,
                                    event_queues=event_queues,
                                    retry_limit=args["retry_limit"],
                                    consume_all=args["consume_all"],
                                    inventory_collection=inventory_collection,
                                    env_name=env_name)
                worker.run()
                if terminator.terminated:
                    args.get('process_vars', {})['operational'] = \
                        OperationalStatus.STOPPED
            except KeyboardInterrupt:
                print('Stopped')
                args['process_vars']['operational'] = OperationalStatus.STOPPED
            except Exception as e:
                logger.log.exception(e)
                args['process_vars']['operational'] = OperationalStatus.ERROR
            finally:
                # This should enable safe saving of shared variables
                time.sleep(0.1)
Example #9
0
class EventManager(Manager):

    # After EventManager receives a SIGTERM,
    # it will try to terminate all listeners.
    # After this delay, a SIGKILL will be sent
    # to each listener that is still alive.
    SIGKILL_DELAY = 5  # in seconds

    DEFAULTS = {
        "mongo_config": "",
        "collection": "environments_config",
        "inventory": "inventory",
        "interval": 5,
        "loglevel": "INFO"
    }

    LISTENERS = {
        'Mirantis': {
            '6.0': DefaultListener,
            '7.0': DefaultListener,
            '8.0': DefaultListener,
            '9.0': DefaultListener
        },
        'RDO': {
            'Mitaka': DefaultListener,
            'Liberty': DefaultListener,
        },
        'Apex': {
            'Euphrates': DefaultListener,
        },
    }

    def __init__(self):
        self.args = self.get_args()
        super().__init__(log_directory=self.args.log_directory,
                         mongo_config_file=self.args.mongo_config)
        self.db_client = None
        self.interval = None
        self.processes = []

    @staticmethod
    def get_args():
        parser = argparse.ArgumentParser()
        parser.add_argument(
            "-m",
            "--mongo_config",
            nargs="?",
            type=str,
            default=EventManager.DEFAULTS["mongo_config"],
            help="Name of config file with MongoDB server access details")
        parser.add_argument("-c",
                            "--collection",
                            nargs="?",
                            type=str,
                            default=EventManager.DEFAULTS["collection"],
                            help="Environments collection to read from "
                            "(default: '{}')".format(
                                EventManager.DEFAULTS["collection"]))
        parser.add_argument("-y",
                            "--inventory",
                            nargs="?",
                            type=str,
                            default=EventManager.DEFAULTS["inventory"],
                            help="name of inventory collection "
                            "(default: '{}')".format(
                                EventManager.DEFAULTS["inventory"]))
        parser.add_argument(
            "-i",
            "--interval",
            nargs="?",
            type=float,
            default=EventManager.DEFAULTS["interval"],
            help="Interval between collection polls "
            "(must be more than {} seconds. Default: {})".format(
                EventManager.MIN_INTERVAL, EventManager.DEFAULTS["interval"]))
        parser.add_argument("-l",
                            "--loglevel",
                            nargs="?",
                            type=str,
                            default=EventManager.DEFAULTS["loglevel"],
                            help="Logging level \n(default: '{}')".format(
                                EventManager.DEFAULTS["loglevel"]))
        parser.add_argument(
            "-d",
            "--log_directory",
            nargs="?",
            type=str,
            default=FileLogger.LOG_DIRECTORY,
            help="File logger directory \n(default: '{}')".format(
                FileLogger.LOG_DIRECTORY))
        args = parser.parse_args()
        return args

    def configure(self):
        self.db_client = MongoAccess()
        self.inv = InventoryMgr()
        self.inv.set_collections(self.args.inventory)
        self.collection = self.db_client.db[self.args.collection]
        self.interval = max(self.MIN_INTERVAL, self.args.interval)
        self.log.set_loglevel(self.args.loglevel)

        self.log.info("Started EventManager with following configuration:\n"
                      "Mongo config file path: {0}\n"
                      "Collection: {1}\n"
                      "Polling interval: {2} second(s)".format(
                          self.args.mongo_config, self.collection.name,
                          self.interval))

    def get_listener(self, env: str):
        env_config = self.inv.get_env_config(env)
        return (self.LISTENERS.get(env_config.get('distribution'), {}).get(
            env_config.get('distribution_version'), DefaultListener))

    def listen_to_events(self, listener: ListenerBase, env_name: str,
                         process_vars: dict):
        listener.listen({
            'env': env_name,
            'mongo_config': self.args.mongo_config,
            'inventory': self.args.inventory,
            'loglevel': self.args.loglevel,
            'environments_collection': self.args.collection,
            'process_vars': process_vars
        })

    def _get_alive_processes(self):
        return [p for p in self.processes if p['process'].is_alive()]

    # Get all processes that should be terminated
    def _get_stuck_processes(self, stopped_processes: list):
        return [
            p for p in self._get_alive_processes()
            if p.get("name") in map(lambda p: p.get("name"), stopped_processes)
        ]

    # Give processes time to finish and kill them if they are stuck
    def _kill_stuck_processes(self, process_list: list):
        if self._get_stuck_processes(process_list):
            time.sleep(self.SIGKILL_DELAY)
        for process in self._get_stuck_processes(process_list):
            self.log.info("Killing event listener '{0}'".format(
                process.get("name")))
            os.kill(process.get("process").pid, signal.SIGKILL)

    def _get_operational(self, process: dict) -> OperationalStatus:
        try:
            return process.get("vars", {})\
                          .get("operational")
        except:
            self.log.error("Event listener '{0}' is unreachable".format(
                process.get("name")))
            return OperationalStatus.STOPPED

    def _update_operational_status(self, status: OperationalStatus):
        self.collection.update_many(
            {
                "name": {
                    "$in": [
                        process.get("name") for process in self.processes
                        if self._get_operational(process) == status
                    ]
                }
            }, {"$set": {
                "operational": status.value
            }})

    def update_operational_statuses(self):
        self._update_operational_status(OperationalStatus.RUNNING)
        self._update_operational_status(OperationalStatus.ERROR)
        self._update_operational_status(OperationalStatus.STOPPED)

    def cleanup_processes(self):
        # Query for envs that are no longer eligible for listening
        # (scanned == false and/or listen == false)
        dropped_envs = [
            env['name'] for env in self.collection.find(
                filter={'$or': [{
                    'scanned': False
                }, {
                    'listen': False
                }]},
                projection=['name'])
        ]

        live_processes = []
        stopped_processes = []
        # Drop already terminated processes
        # and for all others perform filtering
        for process in self._get_alive_processes():
            # If env no longer qualifies for listening,
            # stop the listener.
            # Otherwise, keep the process
            if process['name'] in dropped_envs:
                self.log.info("Stopping event listener '{0}'".format(
                    process.get("name")))
                process['process'].terminate()
                stopped_processes.append(process)
            else:
                live_processes.append(process)

        self._kill_stuck_processes(stopped_processes)

        # Update all 'operational' statuses
        # for processes stopped on the previous step
        self.collection.update_many(
            {
                "name": {
                    "$in":
                    [process.get("name") for process in stopped_processes]
                }
            }, {"$set": {
                "operational": OperationalStatus.STOPPED.value
            }})

        # Keep the living processes
        self.processes = live_processes

    def do_action(self):
        try:
            while True:
                # Update "operational" field in db before removing dead processes
                # so that we keep last statuses of env listeners before they were terminated
                self.update_operational_statuses()

                # Perform a cleanup that filters out all processes
                # that are no longer eligible for listening
                self.cleanup_processes()

                envs = self.collection.find({'scanned': True, 'listen': True})

                # Iterate over environments that don't have an event listener attached
                for env in filter(
                        lambda e: e['name'] not in map(
                            lambda process: process["name"], self.processes),
                        envs):
                    env_name = env['name']

                    if not self.inv.is_feature_supported(
                            env_name, EnvironmentFeatures.LISTENING):
                        self.log.error(
                            "Listening is not supported for env '{}'".format(
                                env_name))
                        self.collection.update({"name": env_name}, {
                            "$set": {
                                "operational": OperationalStatus.ERROR.value
                            }
                        })
                        continue

                    listener = self.get_listener(env_name)
                    if not listener:
                        self.log.error(
                            "No listener is defined for env '{}'".format(
                                env_name))
                        self.collection.update({"name": env_name}, {
                            "$set": {
                                "operational": OperationalStatus.ERROR.value
                            }
                        })
                        continue

                    # A dict that is shared between event manager and newly created env listener
                    process_vars = SharedManager().dict()
                    p = Process(target=self.listen_to_events,
                                args=(
                                    listener,
                                    env_name,
                                    process_vars,
                                ),
                                name=env_name)
                    self.processes.append({
                        "process": p,
                        "name": env_name,
                        "vars": process_vars
                    })
                    self.log.info(
                        "Starting event listener '{0}'".format(env_name))
                    p.start()

                # Make sure statuses are up-to-date before event manager goes to sleep
                self.update_operational_statuses()
                time.sleep(self.interval)
        finally:
            # Fetch operational statuses before terminating listeners.
            # Shared variables won't be available after termination.
            stopping_processes = [
                process.get("name") for process in self.processes
                if self._get_operational(process) != OperationalStatus.ERROR
            ]
            self._update_operational_status(OperationalStatus.ERROR)

            # Gracefully stop processes
            for process in self._get_alive_processes():
                self.log.info("Stopping event listener '{0}'".format(
                    process.get("name")))
                process.get("process").terminate()

            # Kill all remaining processes
            self._kill_stuck_processes(self.processes)

            # Updating operational statuses for stopped processes
            self.collection.update_many({"name": {
                "$in": stopping_processes
            }}, {"$set": {
                "operational": OperationalStatus.STOPPED.value
            }})
Example #10
0
class Monitor:
    DEFAULTS = {
        'env': 'WebEX-Mirantis@Cisco',
        'inventory': 'inventory',
        'loglevel': 'WARNING'
    }

    def __init__(self):
        self.args = self.get_args()
        MongoAccess.set_config_file(self.args.mongo_config)
        self.inv = InventoryMgr()
        self.inv.set_collections(self.args.inventory)
        self.input_text = None

    def get_args(self):
        parser = argparse.ArgumentParser()
        parser.add_argument("-m",
                            "--mongo_config",
                            nargs="?",
                            type=str,
                            default="",
                            help="name of config file with MongoDB server " +
                            "access details")
        parser.add_argument("-e",
                            "--env",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS['env'],
                            help="name of environment to scan \n" +
                            "(default: {})".format(self.DEFAULTS['env']))
        parser.add_argument("-y",
                            "--inventory",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS['inventory'],
                            help="name of inventory collection \n" +
                            "(default: {}".format(self.DEFAULTS['inventory']))
        parser.add_argument('-i',
                            '--inputfile',
                            nargs='?',
                            type=str,
                            default='',
                            help="read input from the specifed file \n" +
                            "(default: from stdin)")
        parser.add_argument("-l",
                            "--loglevel",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["loglevel"],
                            help="logging level \n(default: '{}')".format(
                                self.DEFAULTS["loglevel"]))
        args = parser.parse_args()
        return args

    def get_type_list(self, type_name) -> list:
        types_list = []
        docs = self.inv.find_items({'name': type_name}, collection='constants')
        for types_list in docs:
            types_list = [t['value'] for t in types_list['data']]
        if not types_list:
            raise ValueError('Unable to fetch {}'.format(
                type_name.replace('_', ' ')))
        return types_list

    def match_object_types(self, check_name: str) -> list:
        object_types = self.get_type_list('object_types')
        matches = [t for t in object_types if check_name.startswith(t + '_')]
        return matches

    def match_link_types(self, check_name: str) -> list:
        object_types = self.get_type_list('link_types')
        matches = [
            t for t in object_types if check_name.startswith('link_' + t + '_')
        ]
        return matches

    def find_object_type_and_id(self, check_name: str):
        # if we have multiple matching host types, then take the longest
        # of these. For example, if matches are ['host', 'host_pnic'],
        # then take 'host_pnic'.
        # To facilitate this, we sort the matches by reverse order.
        is_link_check = check_name.startswith('link_')
        check_type = 'link' if is_link_check else 'object'
        if is_link_check:
            matching_types = sorted(self.match_link_types(check_name),
                                    reverse=True)
        else:
            matching_types = sorted(self.match_object_types(check_name),
                                    reverse=True)
        if not matching_types:
            raise ValueError(
                'Unable to match check name "{}" with {} type'.format(
                    check_name, check_type))
        obj_type = matching_types[0]
        postfix_len = len('link_') if is_link_check else 0
        obj_id = (obj_type + '_' if is_link_check else '') + \
            check_name[len(obj_type)+1+postfix_len:]
        return check_type, obj_type, obj_id

    def read_input(self):
        if self.args.inputfile:
            try:
                with open(self.args.inputfile, 'r') as input_file:
                    self.input_text = input_file.read()
            except Exception as e:
                raise FileNotFoundError(
                    "failed to open input file {}: {}".format(
                        self.args.inputfile, str(e)))
        else:
            self.input_text = sys.stdin.read()
            if not self.input_text:
                raise ValueError("No input provided on stdin")

    def get_handler_by_type(self, check_type, obj_type):
        module_name = 'handle_link' if check_type == 'link' \
                else 'handle_' + obj_type
        package = 'monitoring.handlers'
        handler = ClassResolver.get_instance_single_arg(
            self.args, module_name=module_name, package_name=package)
        return handler

    def get_handler(self, check_type, obj_type):
        basic_handling_types = ['vedge', 'vservice']
        if obj_type not in basic_handling_types:
            return self.get_handler_by_type(check_type, obj_type)
        from monitoring.handlers.basic_check_handler \
            import BasicCheckHandler
        return BasicCheckHandler(self.args)

    def process_input(self):
        check_result_full = json.loads(self.input_text)
        check_client = check_result_full['client']
        check_result = check_result_full['check']
        check_result['id'] = check_result_full['id']
        name = check_result['name']
        check_type, object_type, object_id = \
            monitor.find_object_type_and_id(name)
        if 'environment' in check_client:
            self.args.env = check_client['environment']

        check_handler = self.get_handler(check_type, object_type)
        if check_handler:
            check_handler.handle(object_id, check_result)

    def process_check_result(self):
        self.read_input()
        self.process_input()
Example #11
0
class Monitor:
    DEFAULTS = {
        'env': 'WebEX-Mirantis@Cisco',
        'inventory': 'inventory',
        'loglevel': 'WARNING'
    }

    def __init__(self):
        self.args = self.get_args()
        MongoAccess.set_config_file(self.args.mongo_config)
        self.inv = InventoryMgr()
        self.inv.set_collections(self.args.inventory)
        self.configuration = Configuration()
        self.input_text = None
        self.converter = SpecialCharConverter()

    def get_args(self):
        parser = argparse.ArgumentParser()
        parser.add_argument("-m",
                            "--mongo_config",
                            nargs="?",
                            type=str,
                            default="",
                            help="name of config file with MongoDB server " +
                            "access details")
        parser.add_argument("-e",
                            "--env",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS['env'],
                            help="name of environment to scan \n" +
                            "(default: {})".format(self.DEFAULTS['env']))
        parser.add_argument("-y",
                            "--inventory",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS['inventory'],
                            help="name of inventory collection \n" +
                            "(default: {}".format(self.DEFAULTS['inventory']))
        parser.add_argument('-i',
                            '--inputfile',
                            nargs='?',
                            type=str,
                            default='',
                            help="read input from the specifed file \n" +
                            "(default: from stdin)")
        parser.add_argument("-l",
                            "--loglevel",
                            nargs="?",
                            type=str,
                            default=self.DEFAULTS["loglevel"],
                            help="logging level \n(default: '{}')".format(
                                self.DEFAULTS["loglevel"]))
        args = parser.parse_args()
        return args

    def get_type_list(self, type_name) -> list:
        types_list = []
        docs = self.inv.find_items({'name': type_name}, collection='constants')
        for types_list in docs:
            types_list = [t['value'] for t in types_list['data']]
        if not types_list:
            raise ValueError('Unable to fetch {}'.format(
                type_name.replace('_', ' ')))
        return types_list

    def match_object_types(self, check_name: str) -> list:
        object_types = self.get_type_list('object_types')
        matches = [t for t in object_types if check_name.startswith(t + '_')]
        return matches

    def match_link_types(self, check_name: str) -> list:
        object_types = self.get_type_list('link_types')
        matches = [
            t for t in object_types if check_name.startswith('link_' + t + '_')
        ]
        return matches

    def find_object_type_and_id(self, check_name: str):
        # if we have multiple matching host types, then take the longest
        # of these. For example, if matches are ['host', 'host_pnic'],
        # then take 'host_pnic'.
        # To facilitate this, we sort the matches by reverse order.
        is_link_check = check_name.startswith('link_')
        check_type = 'link' if is_link_check else 'object'
        if is_link_check:
            matching_types = sorted(self.match_link_types(check_name),
                                    reverse=True)
        else:
            matching_types = sorted(self.match_object_types(check_name),
                                    reverse=True)
        if not matching_types:
            raise ValueError(
                'Unable to match check name "{}" with {} type'.format(
                    check_name, check_type))
        obj_type = matching_types[0]
        postfix_len = len('link_') if is_link_check else 0
        obj_id = (obj_type + '_' if is_link_check else '') + \
            check_name[len(obj_type)+1+postfix_len:]
        return check_type, obj_type, obj_id

    def read_input(self):
        if self.args.inputfile:
            try:
                with open(self.args.inputfile, 'r') as input_file:
                    self.input_text = input_file.read()
            except Exception as e:
                raise FileNotFoundError(
                    "failed to open input file {}: {}".format(
                        self.args.inputfile, str(e)))
        else:
            self.input_text = sys.stdin.read()
            if not self.input_text:
                raise ValueError("No input provided on stdin")

    def get_handler_by_type(self, check_type, obj_type):
        module_name = 'handle_link' if check_type == 'link' \
                else 'handle_' + obj_type
        package = 'monitoring.handlers'
        handler = ClassResolver.get_instance_single_arg(
            self.args, module_name=module_name, package_name=package)
        return handler

    def get_handler(self, check_type, obj_type):
        basic_handling_types = ['instance', 'vedge', 'vservice', 'vconnector']
        if obj_type not in basic_handling_types:
            return self.get_handler_by_type(check_type, obj_type)
        from monitoring.handlers.basic_check_handler \
            import BasicCheckHandler
        return BasicCheckHandler(self.args)

    def check_link_interdependency_for(self,
                                       object_id: str,
                                       from_type: str = None,
                                       to_type: str = None):
        if from_type is not None and to_type is not None or \
                from_type is None and to_type is None:
            raise ValueError('check_link_interdependency: '
                             'supply one of from_type/to_type')
        obj_id = self.converter.decode_special_characters(object_id)
        obj = self.inv.get_by_id(environment=self.args.env, item_id=obj_id)
        if not obj:
            self.inv.log.error(
                'check_link_interdependency: '
                'failed to find object with ID: {}'.format(object_id))
            return
        if 'status' not in obj:
            return
        id_attr = 'source_id' if from_type is None else 'target_id'
        link_type = '{}-{}'.format(
            from_type if from_type is not None else obj['type'],
            to_type if to_type is not None else obj['type'])
        condition = {
            'environment': self.args.env,
            'link_type': link_type,
            id_attr: obj_id
        }
        link = self.inv.find_one(search=condition, collection='links')
        if not link:
            self.inv.log.error('check_link_interdependency: '
                               'failed to find {} link with {}: {}'.format(
                                   link_type, id_attr, obj_id))
            return
        other_id_attr = '{}_id' \
            .format('source' if from_type is not None else 'target')
        other_obj = self.inv.get_by_id(environment=self.args.env,
                                       item_id=link[other_id_attr])
        if not other_obj:
            self.inv.log.error(
                'check_link_interdependency: '
                'failed to find {} with ID: {} (link type: {})'.format(
                    other_id_attr, link[other_id_attr], link_type))
            return
        if 'status' not in other_obj:
            return
        status = 'Warning'
        if obj['status'] == 'OK' and other_obj['status'] == 'OK':
            status = 'OK'
        elif obj['status'] == 'OK' and other_obj['status'] == 'OK':
            status = 'OK'
        link['status'] = status
        time_format = MonitoringCheckHandler.TIME_FORMAT
        timestamp1 = obj['status_timestamp']
        t1 = datetime.datetime.strptime(timestamp1, time_format)
        timestamp2 = other_obj['status_timestamp']
        t2 = datetime.datetime.strptime(timestamp2, time_format)
        timestamp = max(t1, t2)
        link['status_timestamp'] = datetime.datetime.strftime(
            timestamp, time_format)
        self.inv.set(link, self.inv.collections['links'])

    def check_link_interdependency(self, object_id: str, object_type: str):
        conf = self.configuration.get_env_config()
        if 'OVS' in conf['mechanism_drivers']:
            if object_type == 'vedge':
                self.check_link_interdependency_for(object_id,
                                                    to_type='host_pnic')
            if object_type == 'host_pnic':
                self.check_link_interdependency_for(object_id,
                                                    from_type='vedge')

    def process_input(self):
        check_result_full = json.loads(self.input_text)
        check_client = check_result_full['client']
        check_result = check_result_full['check']
        check_result['id'] = check_result_full['id']
        name = check_result['name']
        check_type, object_type, object_id = \
            monitor.find_object_type_and_id(name)
        if 'environment' in check_client:
            self.args.env = check_client['environment']
        else:
            raise ValueError('Check client should contain environment name')
        self.configuration.use_env(self.args.env)

        check_handler = self.get_handler(check_type, object_type)
        if check_handler:
            check_handler.handle(object_id, check_result)
        self.check_link_interdependency(object_id, object_type)

    def process_check_result(self):
        self.read_input()
        self.process_input()
Example #12
0
class StatsConsumer(MongoAccess):
    default_env = "WebEX-Mirantis@Cisco"

    def __init__(self):
        self.get_args()
        MongoAccess.set_config_file(self.args.mongo_config)
        MongoAccess.__init__(self)
        self.log = FullLogger()
        self.log.set_loglevel(self.args.loglevel)
        self.conf = Configuration()
        self.inv = InventoryMgr()
        self.inv.set_collections(self.args.inventory)
        stats_coll = self.inv.get_coll_name('statistics')
        self.stats = self.db[stats_coll]
        # consume messages from topic
        self.consumer = KafkaConsumer('VPP.stats',
                                      group_id='calipso_test',
                                      auto_offset_reset=self.args.offset,
                                      bootstrap_servers=['localhost:9092'])

    def get_args(self):
        # try to read scan plan from command line parameters
        parser = argparse.ArgumentParser()
        parser.add_argument("-m", "--mongo_config", nargs="?", type=str,
                            default="",
                            help="name of config file " +
                            "with MongoDB servr access details")
        parser.add_argument("-e", "--env", nargs="?", type=str,
                            default=self.default_env,
                            help="name of environment to scan \n" +
                            "(default: " + self.default_env + ")")
        parser.add_argument("-y", "--inventory", nargs="?", type=str,
                            default="inventory",
                            help="name of inventory collection \n" +
                            "(default: 'inventory')")
        parser.add_argument("-l", "--loglevel", nargs="?", type=str,
                            default="INFO",
                            help="logging level \n(default: 'INFO')")
        parser.add_argument("-o", "--offset", nargs="?", type=str,
                            default="largest",
                            help="where to start reading" +
                                 " - use 'smallest' for start \n" +
                                 "(default: 'largest')")
        self.args = parser.parse_args()

    def read(self):
        for kafka_msg in self.consumer:
            msg = json.loads(kafka_msg.value.decode())
            self.add_stats(msg)

    def add_stats(self, msg):
        host_ip = msg['hostIp']
        search = {
            'environment': self.args.env,
            'type': 'host',
            'ip_address': host_ip
        }
        host = self.inv.find_items(search, get_single=True)
        if not host:
            self.log.error('could not find host with ip address=' + host_ip)
            return
        host_id = host['id']
        search = {
            'environment': self.args.env,
            'type': 'vedge',
            'host': host_id
        }
        vedge = self.inv.find_items(search, get_single=True)
        if not vedge:
            self.log.error('could not find vEdge for host: ' + host_id)
            return
        self.log.info('setting VPP stats for vEdge of host: ' + host_id)
        self.add_stats_for_object(vedge, msg)

    def add_stats_for_object(self, o, msg):
        msg['type'] = 'vedge_flows'
        msg['environment'] = self.args.env
        msg['object_type'] = o['type']
        msg['object_id'] = o['id']
        time_seconds = int(msg['averageArrivalNanoSeconds'] / 1000000000)
        sample_time = time.gmtime(time_seconds)
        msg['sample_time'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", sample_time)
        # find instances between which the flow happens
        # to find the instance, find the related vNIC first
        msg['source'] = self.find_instance_for_stat('source', msg)
        msg['destination'] = self.find_instance_for_stat('destination', msg)
        self.stats.insert_one(msg)

    def find_instance_for_stat(self, direction, msg):
        search_by_mac_address = 'sourceMacAddress' in msg
        value_attr = 'MacAddress' if search_by_mac_address else 'IpAddress'
        value_to_search = msg[direction + value_attr]
        attr = 'mac_address' if search_by_mac_address else 'ip_address'
        search = {
            'environment': self.args.env,
            'type': 'vnic',
            attr: value_to_search
        }
        vnic = self.inv.find_items(search, get_single=True)
        if not vnic:
            self.log.error('failed to find vNIC for ' +
                           attr + '=' + value_to_search)
            return 'Unknown'
        # now find the instance name from the vnic name
        name_path = vnic['name_path'].split('/')
        instance_name = name_path[8]
        return instance_name
class MonitoringCheckHandler(SpecialCharConverter):
    STATUS_LABEL = ['OK', 'Warning', 'Error']

    def __init__(self, args):
        super().__init__()
        self.log = FullLogger()
        self.log.set_loglevel(args.loglevel)
        self.env = args.env
        try:
            self.conf = Configuration(args.mongo_config)
            self.inv = InventoryMgr()
            self.inv.log.set_loglevel(args.loglevel)
            self.inv.set_collections(args.inventory)
        except FileNotFoundError:
            sys.exit(1)

    def doc_by_id(self, object_id):
        doc = self.inv.get_by_id(self.env, object_id)
        if not doc:
            self.log.warn('No matching object found with ID: ' + object_id)
        return doc

    def doc_by_db_id(self, db_id, coll_name=None):
        coll = self.inv.collections[coll_name] if coll_name else None
        doc = self.inv.find({'_id': ObjectId(db_id)},
                            get_single=True,
                            collection=coll)
        if not doc:
            self.log.warn('No matching object found with DB ID: ' + db_id)
        return doc

    def set_doc_status(self, doc, status, status_text, timestamp):
        doc['status'] = self.STATUS_LABEL[status] if isinstance(status, int) \
            else status
        if status_text:
            doc['status_text'] = status_text
        doc['status_timestamp'] = strftime(TIME_FORMAT, timestamp)
        if 'link_type' in doc:
            self.inv.write_link(doc)
        else:
            self.inv.set(doc)

    @staticmethod
    def check_ts(check_result):
        return gmtime(check_result['executed'])

    def keep_result(self, doc, check_result):
        status = check_result['status']
        ts = self.check_ts(check_result)
        self.set_doc_status(doc, status, check_result['output'], ts)
        self.keep_message(doc, check_result)

    def keep_message(self, doc, check_result, error_level=None):
        is_link = 'link_type' in doc
        msg_id = check_result['id']
        obj_id = 'link_{}_{}'.format(doc['source_id'], doc['target_id']) \
            if is_link \
            else doc['id']
        obj_type = 'link_{}'.format(
            doc['link_type']) if is_link else doc['type']
        display_context = obj_id if is_link \
            else doc['network_id'] if doc['type'] == 'port' else doc['id']
        level = error_level if error_level\
            else ERROR_LEVEL[check_result['status']]
        dt = datetime.datetime.utcfromtimestamp(check_result['executed'])
        message = Message(msg_id=msg_id,
                          env=self.env,
                          source=SOURCE_SYSTEM,
                          object_id=obj_id,
                          object_type=obj_type,
                          display_context=display_context,
                          level=level,
                          msg=check_result,
                          ts=dt)
        collection = self.inv.collections['messages']
        collection.insert_one(message.get())
Example #14
0
class MonitoringCheckHandler(SpecialCharConverter):
    status_labels = {}
    TIME_FORMAT = '%Y-%m-%d %H:%M:%S %Z'

    def __init__(self, args):
        super().__init__()
        self.log = FullLogger()
        self.log.set_loglevel(args.loglevel)
        self.env = args.env
        try:
            self.conf = Configuration(args.mongo_config)
            self.inv = InventoryMgr()
            self.inv.log.set_loglevel(args.loglevel)
            self.inv.set_collections(args.inventory)
            self.status_labels = self.get_status_labels()
        except FileNotFoundError:
            sys.exit(1)

    def get_status_labels(self):
        statuses_name_search = {'name': 'monitoring_check_statuses'}
        labels_data = self.inv.find_one(search=statuses_name_search,
                                        collection='constants')
        if not isinstance(labels_data, dict) or 'data' not in labels_data:
            return ''
        labels = {}
        for status_data in labels_data['data']:
            if not isinstance(status_data, dict):
                continue
            val = int(status_data['value'])
            label = status_data['label']
            labels[val] = label
        return labels

    def get_label_for_status(self, status: int) -> str:
        if status not in self.status_labels.keys():
            return ''
        return self.status_labels.get(status, '')

    def doc_by_id(self, object_id):
        doc = self.inv.get_by_id(self.env, object_id)
        if not doc:
            self.log.warn('No matching object found with ID: ' + object_id)
        return doc

    def doc_by_db_id(self, db_id, coll_name=None):
        coll = self.inv.collections[coll_name] if coll_name else None
        doc = self.inv.find({'_id': ObjectId(db_id)},
                            get_single=True,
                            collection=coll)
        if not doc:
            self.log.warn('No matching object found with DB ID: ' + db_id)
        return doc

    def set_doc_status(self, doc, status, status_text, timestamp):
        doc['status_value'] = status if isinstance(status, int) \
            else status
        doc['status'] = self.get_label_for_status(status) \
            if isinstance(status, int) \
            else status
        if status_text:
            doc['status_text'] = status_text
        doc['status_timestamp'] = strftime(self.TIME_FORMAT, timestamp)
        if 'link_type' in doc:
            self.inv.write_link(doc)
        else:
            self.inv.set(doc)

    @staticmethod
    def check_ts(check_result):
        return gmtime(check_result['executed'])

    def keep_result(self, doc, check_result):
        status = check_result['status']
        ts = self.check_ts(check_result)
        self.set_doc_status(doc, status, check_result['output'], ts)
        self.keep_message(doc, check_result)

    def keep_message(self, doc, check_result, error_level=None):
        is_link = 'link_type' in doc
        msg_id = check_result['id']
        obj_id = 'link_{}_{}'.format(doc['source_id'], doc['target_id']) \
            if is_link \
            else doc['id']
        obj_type = 'link_{}'.format(doc['link_type']) if is_link else \
            doc['type']
        display_context = obj_id if is_link \
            else doc['network_id'] if doc['type'] == 'port' else doc['id']
        level = error_level if error_level\
            else ERROR_LEVEL[check_result['status']]
        dt = datetime.datetime.utcfromtimestamp(check_result['executed'])
        message = Message(msg_id=msg_id,
                          env=self.env,
                          source=SOURCE_SYSTEM,
                          object_id=obj_id,
                          object_type=obj_type,
                          display_context=display_context,
                          level=level,
                          msg=check_result,
                          ts=dt)
        collection = self.inv.collections['messages']
        collection.insert_one(message.get())