def __init__(self, service): self.service = service self.ttl = getattr(config.selfmon, "%s_ttl" % self.name) self.last_metrics = {} self.logger = PrefixLoggerAdapter(logger, self.name) self.t0 = int(time.time()) self.next_run = self.t0
def __init__(self, system): self.system = system self.config = system.config self.logger = PrefixLoggerAdapter(logger, "%s][%s" % (system.name, self.name)) self.import_dir = os.path.join(self.PREFIX, system.name, self.name) self.fatal_problems: List[Problem] = [] self.quality_problems: List[Problem] = []
def __init__( self, address=None, pool=None, logger=None, snmp_community=None, calling_service="profilechecker", snmp_version=None, ): self.address = address self.pool = pool self.logger = PrefixLoggerAdapter( logger or self.base_logger, "%s][%s" % (self.pool or "", self.address or "") ) self.result_cache = {} # (method, param) -> result self.error = None self.snmp_community = snmp_community self.calling_service = calling_service self.snmp_version = snmp_version or [SNMP_v2c] self.ignoring_snmp = False if self.snmp_version is None: self.logger.error("SNMP is not supported. Ignoring") self.ignoring_snmp = True if not self.snmp_community: self.logger.error("No SNMP credentials. Ignoring") self.ignoring_snmp = True
def __init__(self, script, tos=None): self.script = script self.profile = script.profile self.logger = PrefixLoggerAdapter(self.script.logger, self.name) self.iostream = None self.motd = "" self.ioloop = None self.command = None self.prompt_stack = [] self.patterns = self.profile.patterns.copy() self.buffer = "" self.is_started = False self.result = None self.error = None self.pattern_table = None self.collected_data = [] self.tos = tos self.current_timeout = None self.is_closed = False self.close_timeout = None self.close_timeout_lock = Lock() self.setup_complete = False self.to_raise_privileges = script.credentials.get( "raise_privileges", True) self.state = "start" # State retries self.super_password_retries = self.profile.cli_retries_super_password
def set_script(self, script): self.script = script self.logger = PrefixLoggerAdapter(self.script.logger, self.name) if self.close_timeout: tornado.ioloop.IOLoop.instance().remove_timeout(self.close_timeout) self.close_timeout = None if self.motd: self.script.set_motd(self.motd)
def __init__(self, script): self._script = weakref.ref(script) self.ioloop = None self.result = None self.logger = PrefixLoggerAdapter(script.logger, self.name) self.timeouts_limit = 0 self.timeouts = 0 self.socket = None
def __init__(self, script, tos: Optional[int] = None): self.script = script self.profile = script.profile self.logger = PrefixLoggerAdapter(self.script.logger, self.name) self.stream: Optional[BaseStream] = None self.tos = tos self.is_started = False # Current error to raise on TimeoutError self.timeout_exception_cls = CLIConnectionReset
def __init__(self, service, service_name, sync=False, hints=None): self._logger = PrefixLoggerAdapter(logger, service_name) self._service = service self._service_name = service_name self._api = service_name.split("-")[0] self._tid = itertools.count() self._transactions = {} self._hints = hints self._sync = sync
def __init__(self, script, rate: Optional[float] = None): self._script = weakref.ref(script) self.logger = PrefixLoggerAdapter(script.logger, self.name) self.timeouts_limit = 0 self.timeouts = 0 self.socket = None self.display_hints = None self.snmp_version = None self.rate_limit: Optional[AsyncRateLimit] = AsyncRateLimit( rate) if rate else None
def __init__(self, script): self.script = script if script: # For testing purposes self.logger = PrefixLoggerAdapter(script.logger, "http") self.headers = {} self.cookies = None self.session_started = False self.request_id = 1 self.session_id = None self.request_middleware = None if self.script: # For testing purposes self.setup_middleware()
def __init__(self, *args, **kwargs): super(MODiscoveryJob, self).__init__(*args, **kwargs) self.out_buffer = StringIO() self.logger = PrefixLoggerAdapter(self.logger, "", target=self.out_buffer) self.check_timings = [] self.problems = [] self.caps = None self.has_fatal_error = False self.service = self.scheduler.service # Additional artefacts can be passed between checks in one session self.artefacts = {}
def __init__(self, scheduler, attrs): """ :param scheduler: Scheduler instance :param attrs: dict containing record from scheduler's collection """ self.scheduler = scheduler self.attrs = attrs self.object = None self.start_time = None self.duration = None self.logger = PrefixLoggerAdapter(scheduler.logger, self.get_display_key()) self.context = {}
def __init__(self, chain): self.chain = chain self.system = chain.system self.logger = PrefixLoggerAdapter( logger, "%s][%s" % (self.system.name, self.name)) self.disable_mappings = False self.import_dir = os.path.join(config.path.etl_import, self.system.name, self.name) self.archive_dir = os.path.join(self.import_dir, "archive") self.mappings_path = os.path.join(self.import_dir, "mappings.csv") self.mappings = {} self.wf_state_mappings = {} self.new_state_path = None self.c_add = 0 self.c_change = 0 self.c_delete = 0 # Mapped fields self.mapped_fields = self.data_model.get_mapped_fields() # Build clean map self.clean_map = {} # field name -> clean function self.pending_deletes: List[Tuple[str, BaseModel]] = [] # (id, BaseModel) self.referred_errors: List[Tuple[str, BaseModel]] = [] # (id, BaseModel) if self.is_document: import mongoengine.errors unique_fields = [ f.name for f in self.model._fields.values() if f.unique and f.name not in self.ignore_unique ] self.integrity_exception = mongoengine.errors.NotUniqueError else: # Third-party modules import django.db.utils unique_fields = [ f.name for f in self.model._meta.fields if f.unique and f.name != self.model._meta.pk.name and f.name not in self.ignore_unique ] self.integrity_exception = django.db.utils.IntegrityError if unique_fields: self.unique_field = unique_fields[0] else: self.unique_field = None self.has_remote_system: bool = hasattr(self.model, "remote_system") if self.workflow_state_sync: self.load_wf_state_mappings()
def __init__(self, object): self.object = object self.logger = PrefixLoggerAdapter(logger, self.object.name) self.env = None self.templates = {} # fact class -> template self.fcls = {} # template -> Fact class self.facts = {} # Index -> Fact self.rn = 0 # Rule number self.config = None # Cached config self.interface_ranges = None with self.ILOCK: self.AC_POLICY_VIOLATION = AlarmClass.objects.filter( name="Config | Policy Violation").first() if not self.AC_POLICY_VIOLATION: logger.error("Alarm class 'Config | Policy Violation' is not found. Alarms cannot be raised")
def __init__(self, segment, node_hints=None, link_hints=None, force_spring=False): self.logger = PrefixLoggerAdapter(logger, segment.name) self.segment = segment self.segment_siblings = self.segment.get_siblings() self._uplinks_cache = {} self.segment_objects = set() if self.segment.parent: self.parent_segment = self.segment.parent self.ancestor_segments = set(self.segment.get_path()[:-1]) else: self.parent_segment = None self.ancestor_segments = set() super().__init__(node_hints, link_hints, force_spring)
def __init__(self, chain): self.chain = chain self.system = chain.system self.logger = PrefixLoggerAdapter( logger, "%s][%s" % (self.system.name, self.name)) self.disable_mappings = False self.import_dir = os.path.join(self.PREFIX, self.system.name, self.name) self.archive_dir = os.path.join(self.import_dir, "archive") self.mappings_path = os.path.join(self.import_dir, "mappings.csv") self.mappings = {} self.new_state_path = None self.c_add = 0 self.c_change = 0 self.c_delete = 0 # Build clean map self.clean_map = dict( (n, self.clean_str) for n in self.fields) # field name -> clean function self.pending_deletes = [] # (id, string) self.reffered_errors = [] # (id, string) if self.is_document: import mongoengine.errors unique_fields = [ f.name for f in six.itervalues(self.model._fields) if f.unique and f.name not in self.ignore_unique ] self.integrity_exception = mongoengine.errors.NotUniqueError else: # Third-party modules import django.db.utils unique_fields = [ f.name for f in self.model._meta.fields if f.unique and f.name != self.model._meta.pk.name and f.name not in self.ignore_unique ] self.integrity_exception = django.db.utils.IntegrityError if unique_fields: self.unique_field = unique_fields[0] else: self.unique_field = None
def __init__(self, script, tos=None): self.script = script self.profile = script.profile self.logger = PrefixLoggerAdapter(self.script.logger, self.name) self.iostream = None self.ioloop = None self.command = None self.buffer = "" self.is_started = False self.result = None self.error = None self.is_closed = False self.close_timeout = None self.current_timeout = None self.tos = tos self.rx_mml_end = re.compile(self.script.profile.pattern_mml_end, re.MULTILINE) if self.script.profile.pattern_mml_continue: self.rx_mml_continue = re.compile(self.script.profile.pattern_mml_continue, re.MULTILINE) else: self.rx_mml_continue = None
def __init__(self, script, tos=None): self.script = script self.profile = script.profile self.logger = PrefixLoggerAdapter(self.script.logger, self.name) self.iostream = None self.ioloop = None self.path = None self.cseq = 1 self.method = None self.headers = None self.auth = None self.buffer = "" self.is_started = False self.result = None self.error = None self.is_closed = False self.close_timeout = None self.current_timeout = None self.tos = tos self.rx_rtsp_end = "\r\n\r\n"
def set_script(self, script): self.script = script self.logger = PrefixLoggerAdapter(self.script.logger, self.name) self.reset_close_timeout() if self.motd: self.script.set_motd(self.motd)
def __init__(self): self.logger = PrefixLoggerAdapter(logger, self.name)
def wipe(o): if not hasattr(o, "id"): try: o = ManagedObject.objects.get(id=o) except ManagedObject.DoesNotExist: return True log = PrefixLoggerAdapter(logger, str(o.id)) # Wiping discovery tasks log.debug("Wiping discovery tasks") for j in [ ManagedObject.BOX_DISCOVERY_JOB, ManagedObject.PERIODIC_DISCOVERY_JOB ]: Job.remove("discovery", j, key=o.id, pool=o.pool.name) # Wiping FM events log.debug("Wiping events") FailedEvent.objects.filter(managed_object=o.id).delete() ActiveEvent.objects.filter(managed_object=o.id).delete() ArchivedEvent.objects.filter(managed_object=o.id).delete() # Wiping alarms log.debug("Wiping alarms") for ac in (ActiveAlarm, ArchivedAlarm): for a in ac.objects.filter(managed_object=o.id): # Relink root causes my_root = a.root for iac in (ActiveAlarm, ArchivedAlarm): for ia in iac.objects.filter(root=a.id): ia.root = my_root ia.save() # Delete alarm a.delete() # Wiping MAC DB log.debug("Wiping MAC DB") MACDB._get_collection().remove({"managed_object": o.id}) # Wiping discovery id cache log.debug("Wiping discovery id") DiscoveryID._get_collection().remove({"object": o.id}) # Wiping interfaces, subs and links # Wipe links log.debug("Wiping links") for i in Interface.objects.filter(managed_object=o.id): # @todo: Remove aggregated links correctly Link.objects.filter(interfaces=i.id).delete() # log.debug("Wiping subinterfaces") SubInterface.objects.filter(managed_object=o.id).delete() log.debug("Wiping interfaces") Interface.objects.filter(managed_object=o.id).delete() log.debug("Wiping forwarding instances") ForwardingInstance.objects.filter(managed_object=o.id).delete() # Unbind from IPAM log.debug("Unbind from IPAM") for a in Address.objects.filter(managed_object=o): a.managed_object = None a.save() # Wipe object status log.debug("Wiping object status") ObjectStatus.objects.filter(object=o.id).delete() # Wipe outages log.debug("Wiping outages") Outage.objects.filter(object=o.id).delete() # Wipe uptimes log.debug("Wiping uptimes") Uptime.objects.filter(object=o.id).delete() # Wipe reboots log.debug("Wiping reboots") Reboot.objects.filter(object=o.id).delete() # Delete Managed Object's capabilities log.debug("Wiping capabilitites") ObjectCapabilities.objects.filter(object=o.id).delete() # Delete Managed Object's attributes log.debug("Wiping attributes") ManagedObjectAttribute.objects.filter(managed_object=o).delete() # Finally delete object and config log.debug("Finally wiping object") o.delete() log.debug("Done")
def __init__(self): self.logger = PrefixLoggerAdapter(logger, self.name) self.classes = {} self.lock = threading.Lock() self.all_classes = set()
def __init__( self, service, credentials, args=None, capabilities=None, version=None, parent=None, timeout=None, name=None, session=None, session_idle_timeout=None, ): self.service = service self.tos = config.activator.tos self.pool = config.pool self.parent = parent self._motd = None name = name or self.name self.logger = PrefixLoggerAdapter( self.base_logger, "%s] [%s" % (self.name, credentials.get("address", "-")) ) if self.parent: self.profile = self.parent.profile else: self.profile = profile_loader.get_profile(".".join(name.split(".")[:2]))() self.credentials = credentials or {} self.version = version or {} self.capabilities = capabilities or {} self.timeout = timeout or self.get_timeout() self.start_time = None self._interface = self.interface() self.args = self.clean_input(args) if args else {} self.cli_stream = None self.mml_stream = None self.rtsp_stream = None if self.parent: self.snmp = self.root.snmp elif self.is_beefed: self.snmp = BeefSNMP(self) else: self.snmp = SNMP(self) if self.parent: self.http = self.root.http else: self.http = HTTP(self) self.to_disable_pager = not self.parent and self.profile.command_disable_pager self.scripts = ScriptsHub(self) # Store session id self.session = session self.session_idle_timeout = session_idle_timeout or self.SESSION_IDLE_TIMEOUT # Cache CLI and SNMP calls, if set self.is_cached = False # Suitable only when self.parent is None. # Cached results for scripts marked with "cache" self.call_cache = {} # Suitable only when self.parent is None # Cached results of self.cli calls self.cli_cache = {} # self.http_cache = {} self.partial_result = None # Tracking self.to_track = False self.cli_tracked_data = {} # command -> [packets] self.cli_tracked_command = None # state -> [..] self.cli_fsm_tracked_data = {} # if not parent and version and not name.endswith(".get_version"): self.logger.debug("Filling get_version cache with %s", version) s = name.split(".") self.set_cache("%s.%s.get_version" % (s[0], s[1]), {}, version) # Fill matchers if not self.name.endswith(".get_version"): self.apply_matchers() # if self.profile.setup_script: self.profile.setup_script(self)
def __init__(self, remote_system): self.remote_system = remote_system self.name = remote_system.name self.config = self.remote_system.config self.logger = PrefixLoggerAdapter(logger, self.name)
async def init_api(self): # Build tags docs openapi_tags = [] for tag in self.BASE_OPENAPI_TAGS_DOCS: openapi_tags += [{ "name": tag, "description": self.BASE_OPENAPI_TAGS_DOCS[tag] }] if self.OPENAPI_TAGS_DOCS: for tag in self.OPENAPI_TAGS_DOCS: openapi_tags += [{ "name": tag, "description": self.OPENAPI_TAGS_DOCS[tag] }] # Build FastAPI app self.app = FastAPI( title="NOC '%s' Service API" % (self.name or "unknown"), version=version.version, openapi_url="/api/%s/openapi.json" % self.name, docs_url="/api/%s/docs" % self.name, redoc_url="/api/%s/redoc" % self.name, openapi_tags=openapi_tags, exception_handlers={ Exception: self.error_handler, RequestValidationError: self.request_validation_error_handler, }, ) self.app.add_middleware(ProxyHeadersMiddleware, trusted_hosts="*") self.app.add_middleware(LoggingMiddleware, logger=PrefixLoggerAdapter(self.logger, "api")) self.app.add_middleware(SpanMiddleware, service_name=self.name) self.server: Optional[uvicorn.Server] = None # Initialize routers for path in loader.iter_classes(): self.app.include_router(loader.get_class(path)) service_paths = ("services", self.name, "paths") if os.path.exists(os.path.join(*service_paths)): extra_loader = ServicePathLoader() extra_loader.base_path = service_paths for path in extra_loader.iter_classes(): kls = extra_loader.get_class(path) if kls: self.app.include_router(kls) # Get address and port to bind addr, port = self.get_service_address() # Initialize uvicorn server # Reproduce Service.run/.serve method uvi_config = uvicorn.Config(self.app, host=addr, port=port, lifespan="on", access_log=False, loop="none") self.server = uvicorn.Server(config=uvi_config) uvi_config.setup_event_loop() uvi_config.load() self.server.lifespan = uvi_config.lifespan_class(uvi_config) await self.server.startup() # Get effective listen socket port self.address, self.port = self.get_effective_address() self.logger.info("Running HTTP APIs at http://%s:%s/", self.address, self.port) self.logger.info("Running HTTP APIs Docs at http://%s:%s/api/%s/docs", self.address, self.port, self.name) self.loop.create_task(self.server.main_loop())
def initialize(self, service): self.service = service self.logger = PrefixLoggerAdapter(self.service.logger, self.name) self.tz = pytz.timezone(config.timezone)
def set_script(self, script): self.script = script self.logger = PrefixLoggerAdapter(self.script.logger, self.name)
def __init__(self, script): self.script = script self.logger = PrefixLoggerAdapter(script.logger, "http")