def __init__(self, name, cleanup=None, reset_running=False, initial_submit=False, max_threads=None, preserve_order=False, max_faults=None, mrt_limit=None): self.logger = PrefixLoggerAdapter(logger, name) self.name = name self.job_classes = {} self.collection_name = self.COLLECTION_BASE + self.name self.collection = get_db()[self.collection_name] self.active_mrt = {} # ReduceTask -> Job instance self.cleanup_callback = cleanup self.reset_running = reset_running self.ignored = [] self.initial_submit = initial_submit self.initial_submit_next_check = {} # job class -> timestamp self.max_threads = max_threads self.preserve_order = preserve_order self.max_faults = max_faults self.mrt_limit = mrt_limit self.mrt_overload = False self.running_lock = threading.Lock() self.running_count = defaultdict(int) # Group -> Count self.log_jobs = None self.metrics = MetricsHub( "noc.scheduler.%s" % name, "jobs.count", "jobs.success", "jobs.failed", "jobs.dereference.count", "jobs.dereference.success", "jobs.dereference.failed", "jobs.time" )
def __init__(self, name, local=False): self.logger = PrefixLoggerAdapter(logger, name) if name not in self.COLLECTIONS: self.logger.error("Invalid collection '%s'", name) raise ValueError("Invalid collection '%s'" % name) m, c = name.split(".", 1) self.module = m self.cname = name self.name = c self.local = local self.doc = self.COLLECTIONS[name] self.items = {} # uuid -> CollectionItem self.changed = False self.ref_cache = {} self.partial = set() if hasattr(self.doc, "name"): # Use .name field when present self.get_name = attrgetter("name") else: # Or first unique field otherwise uname = None for spec in self.doc._meta["index_specs"]: if spec["unique"] and len(spec["fields"]) == 1: uname = spec["fields"][0][0] if not uname: self.logger.error("Cannot find unique index") raise ValueError("No unique index") self.get_name = attrgetter(uname) self.translations = self.TRANSLATIONS.get(name, self.TRANSLATIONS[None])
def __init__(self, factory, socket=None): self.logger = PrefixLoggerAdapter(logger, self.get_label()) self.factory = factory self.socket = socket self.start_time = time.time() self.last_read = self.start_time + 100 # @todo: Meaningful value self.name = None self.closing = False # In closing state self.stale = False # Closed as stale self.ttl = self.TTL self.set_timeout(self.TTL) self.factory.register_socket(self) if socket: self.set_status(r=True)
def __init__(self, object): self.object = object self.logger = PrefixLoggerAdapter(logger, self.object.name) self.env = None self.templates = {} # fact class -> template self.fcls = {} # template -> Fact class self.facts = {} # Index -> Fact self.rn = 0 # Rule number self.config = None # Cached config self.interface_ranges = None with self.ILOCK: self.AC_POLICY_VIOLATION = AlarmClass.objects.filter( name="Config | Policy Violation").first() if not self.AC_POLICY_VIOLATION: logger.error("Alarm class 'Config | Policy Violation' is not found. Alarms cannot be raised")
def __init__(self, scheduler, key=None, data=None, schedule=None): self.scheduler = scheduler self.key = key self.data = data or {} self.schedule = schedule or {} self.object = None # Set by dereference() self.started = None # Timestamp self._log = [] self.on_complete = [] # List of (job_name, key) # to launch on complete self.to_log = scheduler and scheduler.to_log_jobs self.job_log = [] self.logger = PrefixLoggerAdapter( logger, "%s][%s][%s" % (self.scheduler.name, self.name, self.get_display_key())) if scheduler.to_log_jobs: self.logger = TeeLoggerAdapter(self.logger, self.job_log)
def __init__(self, name, is_superuser, enabled, user, uid, group, gid, instance_id, config_path): self.logger = PrefixLoggerAdapter(logger, "%s#%s" % (name, instance_id)) self.logger.info("Reading config") self.instance_id = instance_id self.name = name self.config_path = config_path self.config = ConfigParser.SafeConfigParser() self.config.read("etc/%s.defaults" % name) self.config.read(config_path) self.enabled = enabled self.pid = None self.pidfile = self.config.get("main", "pidfile")\ .replace("{{instance}}", self.instance_id) self.is_superuser = is_superuser self.user = user self.uid = uid self.group = group self.gid = gid
def configure(self, uuid, handler, interval, metrics, config, managed_object, **kwargs): if not self.uuid: self.logger = PrefixLoggerAdapter(logger, uuid) self.uuid = uuid self.handler_name = handler nh = probe_registry.get_handler(handler) if nh != self.handler: self.handler = nh self.probe = nh.im_class(self.daemon, self) if interval != self.interval: # Change offset self.offset = interval * random.random() self.interval = interval self.next_run = self.get_next_run() if not self.running: self.daemon.reschedule(self) self.config = config # Apply metrics if self.metrics != metrics: self.metrics = metrics c = set(self.mdata) n = set(m["metric_type"] for m in metrics) # Remove metrics for m in c - n: del self.mdata[m] # Create metrics for m in n - c: self.mdata[m] = Metric(self.daemon) # Configure metrics for m in metrics: m["managed_object"] = managed_object self.mdata[m["metric_type"]].configure(**m) if len(metrics) == 1: self.default_metric_type = metrics[0]["metric_type"] else: self.default_metric_type = None
def __init__(self, name="pool", metrics_prefix=None, start_threads=1, max_threads=10, min_spare=1, max_spare=1, backlog=0): if min_spare > max_spare: raise ValueError("min_spare (%d) must not be greater" " than max_spare (%d)" % (min_spare, max_spare)) if start_threads > max_threads: raise ValueError("start_threads (%d) must not be greater" " than max_threads (%d)" % (start_threads, max_threads)) self.logger = PrefixLoggerAdapter(logger, name) self.name = name if not metrics_prefix: metrics_prefix = "noc" metrics_prefix += "pool.%s" % name self.metrics = MetricsHub( metrics_prefix, "threads.running", "threads.idle", "queue.len" ) self.start_threads = start_threads self.max_threads = max_threads self.min_spare = min_spare self.max_spare = max_spare self.backlog = backlog if backlog else max_threads self.t_lock = Lock() self.threads = set() self.queue = Queue(backlog) self.stopping = False self.stopped = Event() self.n_idle = 0 self.idle_lock = Lock() self.logger.info("Running thread pool '%s'", self.name) self.set_idle(None)
def __init__(self, daemon, task): self.daemon = daemon self.task = task self.missed_oids = {} # oid -> expire time self.logger = PrefixLoggerAdapter(logging.getLogger(self.__module__), self.task.uuid)
def __init__(self, path, indexes=None, fields=None): self.logger = PrefixLoggerAdapter(logger, path) self.path = path self.fields = ["uuid"] + list(fields or []) self.indexes = indexes or [] self.connect = None
def __init__(self, daemon, name): self.daemon = daemon self.name = name self.logger = PrefixLoggerAdapter(daemon.logger, name) self.logger.info("Starting %s (%s)", name, self.type) self.cmd_queue = []
def wipe(o): if o.profile_name.startswith("NOC."): return True log = PrefixLoggerAdapter(logger, str(o.id)) # Delete active map tasks log.debug("Wiping MAP tasks") MapTask.objects.filter(managed_object=o).delete() # Wiping discovery tasks log.debug("Wiping discovery tasks") db = get_db() db.noc.schedules.inv.discovery.remove({"key": o.id}) # Wiping FM events log.debug("Wiping events") NewEvent.objects.filter(managed_object=o.id).delete() FailedEvent.objects.filter(managed_object=o.id).delete() ActiveEvent.objects.filter(managed_object=o.id).delete() ArchivedEvent.objects.filter(managed_object=o.id).delete() # Wiping alarms log.debug("Wiping alarms") for ac in (ActiveAlarm, ArchivedAlarm): for a in ac.objects.filter(managed_object=o.id): # Relink root causes my_root = a.root for iac in (ActiveAlarm, ArchivedAlarm): for ia in iac.objects.filter(root=a.id): ia.root = my_root ia.save() # Delete alarm a.delete() # Wiping MAC DB log.debug("Wiping MAC DB") MACDB._get_collection().remove({"managed_object": o.id}) # Wiping pending link check log.debug("Wiping pending link check") PendingLinkCheck._get_collection().remove({"local_object": o.id}) PendingLinkCheck._get_collection().remove({"remote_object": o.id}) # Wiping discovery id cache log.debug("Wiping discovery id") DiscoveryID._get_collection().remove({"object": o.id}) # Wiping interfaces, subs and links # Wipe links log.debug("Wiping links") for i in Interface.objects.filter(managed_object=o.id): # @todo: Remove aggregated links correctly Link.objects.filter(interfaces=i.id).delete() # log.debug("Wiping subinterfaces") SubInterface.objects.filter(managed_object=o.id).delete() log.debug("Wiping interfaces") Interface.objects.filter(managed_object=o.id).delete() log.debug("Wiping forwarding instances") ForwardingInstance.objects.filter(managed_object=o.id).delete() # Unbind from IPAM log.debug("Unbind from IPAM") for a in Address.objects.filter(managed_object=o): a.managed_object = None a.save() # Wipe object status log.debug("Wiping object status") ObjectStatus.objects.filter(object=o.id).delete() # Wipe outages log.debug("Wiping outages") Outage.objects.filter(object=o.id).delete() # Wipe uptimes log.debug("Wiping uptimes") Uptime.objects.filter(object=o.id).delete() # Wipe reboots log.debug("Wiping reboots") Reboot.objects.filter(object=o.id).delete() # Delete Managed Object's capabilities log.debug("Wiping capabilitites") ObjectCapabilities.objects.filter(object=o.id).delete() # Delete Managed Object's facts log.debug("Wiping facts") ObjectFact.objects.filter(object=o.id).delete() # Delete Managed Object's attributes log.debug("Wiping attributes") ManagedObjectAttribute.objects.filter(managed_object=o).delete() # Detach from validation rule log.debug("Detaching from validation rules") for vr in ValidationRule.objects.filter(objects_list__object=o.id): vr.objects_list = [x for x in vr.objects_list if x.object.id != o.id] if not vr.objects_list and not vr.selectors_list: vr.is_active = False vr.save() # Finally delete object and config log.debug("Finally wiping object") o.delete() log.debug("Done")