def parse_patch(self): r = [] if self.has_snmp_access(): # Trying SNMP try: # SNMPv2-MIB::sysDescr.0 for oid, x in self.snmp.getnext( mib["HUAWEI-SYS-MAN-MIB::hwPatchVersion", 0]): if not x: continue r += [x.strip(smart_text(" \x00"))] if r: return r except (self.snmp.TimeOutError, self.snmp.SNMPError): pass if self.has_snmp_only_access(): return r try: v = self.cli("display patch-information") except (self.ScriptError, CLIError): return [] v = self.rx_patch.search(v) if v and v.group("patch_version"): r += [v.group("patch_version")] return r
def get_beef(self, script, obj): result = { "version": BEEF_FORMAT, "uuid": str(uuid.uuid4()), "spec": None, "changed": datetime.datetime.now().isoformat(), "cli": [], "cli_fsm": [], "mib": [], "mib_encoding": MIB_ENCODING, "cli_encoding": CLI_ENCODING, } # Process CLI answers result["cli"] = self.get_cli_results(script) # Apply CLI fsm states result["cli_fsm"] = self.get_cli_fsm_results(script) # Apply MIB snapshot # self.logger.debug("Collecting MIB snapshot") # result["mib"] = self.get_snmp_results(spec) # Process version reply if script.version: result["box"] = script.version else: result["box"] = { "vendor": smart_text(obj.vendor.name) if obj.vendor else "Unknown", "platform": obj.platform.name if obj.platform else "Unknown", "version": obj.version.version if obj.version else "Unknown", } result["box"]["profile"] = obj.profile.name return result
def handle(self, *args, **options): if len(args) < 1: print("USAGE: %s <model> <object id> [.. <object id>]" % sys.argv[0]) sys.exit(1) m = args[0].replace("-", "_") connect() if m not in self.models: raise CommandError("Invalid model '%s'. Valid models are: %s" % (m, ", ".join(self.models))) objects = [] getter = getattr(self, "get_%s" % m) wiper = getattr(self, "wipe_%s" % m) # Get objects for o_id in args[1:]: o = getter(o_id) if not o: # Not found raise CommandError("Object '%s' is not found" % o_id) objects += [o] # Wipe objects from noc.core.debug import error_report with change_tracker.bulk_changes(): for o in objects: with self.log("Wiping '%s':" % smart_text(o), True): try: wiper(o) except KeyboardInterrupt: raise CommandError( "Interrupted. Wiping is not complete") except Exception: error_report()
def _update_object(cls, data, meta=None, fmt=None, state=None, bulk=None) -> bool: def is_changed(d, h): return not d or d.get(cls.F_HASH) != h obj_id = cls.clean_id(data["id"]) if meta is None and "$meta" in data: meta = data.pop("$meta") m_name = "%s_%s" % (cls.name, fmt) if fmt else cls.name l_name = "%s|%s|%s" % (cls.name, obj_id, fmt) if fmt else "%s|%s" % (cls.name, obj_id) metrics["ds_%s_updated" % m_name] += 1 # Calculate hash hash = cls.get_hash(data) # Get existing object state if state: doc = state.get(obj_id) else: doc = cls.get_collection(fmt).find_one({cls.F_ID: obj_id}, { cls.F_ID: 0, cls.F_HASH: 1 }) if not is_changed(doc, hash): logger.info("[%s] Object hasn't been changed", l_name) return False # Not changed if not fmt and cls.on_change(data): hash = cls.get_hash(data) if not is_changed(doc, hash): logger.info("[%s] Object hasn't been changed", l_name) return False # Not changed after altering metrics["ds_%s_changed" % m_name] += 1 change_id = bson.ObjectId() data["change_id"] = str(change_id) op = { "$set": { cls.F_CHANGEID: change_id, cls.F_HASH: hash, cls.F_DATA: smart_text(orjson.dumps(data)), } } if meta: op["$set"][cls.F_META] = meta elif "$deleted" not in data: op["$unset"] = {cls.F_META: ""} if bulk is None: cls.get_collection(fmt).update_one({cls.F_ID: obj_id}, op, upsert=True) else: bulk += [pymongo.UpdateOne({cls.F_ID: obj_id}, op, upsert=True)] logger.info("[%s] Object has been changed", l_name) if cls.enable_message: # Build MX message logger.info("[%s] Sending message", l_name) cls.send_message(data, change_id) return True
async def load_async(self): consul = ConsulClient(host=self.host, port=self.port, token=self.token) # Convert to dict data = {} if self.path.endswith("/"): pl = len(self.path) else: pl = len(self.path) + 1 index, kv_data = await consul.kv.get(self.path, recurse=True, token=self.token) if not kv_data: return for i in kv_data: k = i["Key"][pl:] v = i["Value"] if v == '""' or v == "''": # fix if value is "" - return '""' v = "" c = k.count("/") if not c: data[k] = v elif c == 1: d = k.split("/") if d[0] not in data: data[d[0]] = {} data[d[0]][d[1]] = smart_text(v) # Upload self.config.update(data)
async def get_rtsp_response(self): result = [] header_sep = b"\r\n\r\n" while True: r = await self.read_until_end() # r = r.strip() # Process header if header_sep not in r: self.result = "" self.error = RTSPBadResponse("Missed header separator") return None header, r = r.split(header_sep, 1) code, headers, msg = self.parse_rtsp_header(header) self.headers = headers self.logger.debug( "Parsed received, err code: %d, err message: %s, headers: %s", code, msg, headers) if code == 401: self.result = "" self.error = RTSPAuthFailed("%s (code=%s)" % (msg, code), code=int(code)) return None if not 200 <= code <= 299: # RTSP Error self.result = "" self.error = RTSPError("%s (code=%s)" % (msg, code), code=int(code)) return None result += [r] break self.result = smart_text(b"".join(result)) return self.result
def get_traceback(reverse=config.traceback.reverse, fp=None, exc_info=None): exc_info = exc_info or sys.exc_info() t, v, tb = exc_info try: check_fatal_errors(t, v) except: # noqa pass # noqa Ignore exceptions now = datetime.datetime.now() r = [ "UNHANDLED EXCEPTION (%s)" % str(now), "PROCESS: %s" % version.process, "VERSION: %s" % version.version, ] if version.branch: r += ["BRANCH: %s CHANGESET: %s" % (version.branch, version.changeset)] if fp: r += ["ERROR FINGERPRINT: %s" % fp] r += [ "WORKING DIRECTORY: %s" % os.getcwd(), "EXCEPTION: %s %s" % (t, v), format_frames(get_traceback_frames(tb), reverse=reverse), ] if not reverse: r += ["UNHANDLED EXCEPTION (%s)" % str(now), str(t), str(v)] return "\n".join(smart_text(x, errors="ignore") for x in r)
def set_value(self, value): self.value = None value = smart_text(value) if isinstance(value, six.string_types): self.services = [value] else: self.services = value
async def whois_async(query, fields=None): """ Perform whois request :param query: :param fields: :return: """ logger.debug("whois %s", query) # Get appropriate whois server if is_fqdn(query): # Use TLD.whois-servers.net for domain lookup tld = query.split(".")[-1] server = "%s.whois-servers.net" % tld else: server = DEFAULT_WHOIS_SERVER # Perform query try: client = TCPClient() stream = await client.connect(server, DEFAULT_WHOIS_PORT) except IOError as e: logger.error("Cannot resolve host '%s': %s", server, e) return try: await stream.write(smart_bytes(query) + b"\r\n") data = await stream.read_until_close() finally: stream.close() data = smart_text(data) data = parse_response(data) if fields: data = [(k, v) for k, v in data if k in fields] return data
def execute(self, sql=None, args=None, nodb=False, post=None, extra=None): def q(v): # @todo: quote dates if isinstance(v, str): return "'%s'" % (v.replace("\\", "\\\\").replace("'", "\\'")) else: return str(v) qs = [] if not nodb: qs += ["database=%s" % config.clickhouse.db] if extra: qs += ["%s=%s" % (k, v) for k, v in extra] if sql: if args: sql = sql % tuple(q(v) for v in args) if post: qs += ["query=%s" % urllib_quote(sql.encode("utf8"))] else: post = sql.encode("utf8") url = "http://%s/?%s" % (random.choice(self.addresses), "&".join(qs)) code, headers, body = fetch_sync( url, method="POST", body=post, user=self.user, password=self.password, connect_timeout=config.clickhouse.connect_timeout, request_timeout=config.clickhouse.request_timeout, ) if code != 200: raise ClickhouseError("%s: %s" % (code, body)) return [smart_text(row).split("\t") for row in body.splitlines()]
def execute_snmp(self): neighb = ( "remote_chassis_id_subtype", "remote_chassis_id", "remote_port_subtype", "remote_port", "remote_port_description", "remote_system_name", "remote_system_description", "remote_capabilities", ) r = [] local_ports = {} # Get LocalPort Table for v in self.snmp.get_tables([ "1.0.8802.1.1.2.1.3.7.1.2", # LLDP-MIB::lldpLocPortIdSubtype "1.0.8802.1.1.2.1.3.7.1.4", # LLDP-MIB::lldpLocPortDesc ]): local_ports[v[0]] = { "local_interface": self.profile.convert_interface_name(v[2]), "local_interface_subtype": v[1], } for v in self.snmp.get_tables( [ "1.0.8802.1.1.2.1.4.1.1.4", # LLDP-MIB::lldpRemChassisIdSubtype "1.0.8802.1.1.2.1.4.1.1.5", # LLDP-MIB::lldpRemChassisId "1.0.8802.1.1.2.1.4.1.1.6", # LLDP-MIB::lldpRemPortIdSubtype "1.0.8802.1.1.2.1.4.1.1.7", # LLDP-MIB::lldpRemPortId "1.0.8802.1.1.2.1.4.1.1.8", # LLDP-MIB::lldpRemPortDesc "1.0.8802.1.1.2.1.4.1.1.9", # LLDP-MIB::lldpRemSysName "1.0.8802.1.1.2.1.4.1.1.10", # LLDP-MIB::lldpRemSysDesc "1.0.8802.1.1.2.1.4.1.1.12", # LLDP-MIB::lldpRemSysCapEnabled ], display_hints={ "1.0.8802.1.1.2.1.4.1.1.7": render_bin, "1.0.8802.1.1.2.1.4.1.1.5": render_bin, }, ): neigh = dict(list(zip(neighb, v[1:]))) if neigh["remote_chassis_id_subtype"] == LLDP_CHASSIS_SUBTYPE_MAC: neigh["remote_chassis_id"] = MAC(neigh["remote_chassis_id"]) if neigh["remote_port_subtype"] == LLDP_PORT_SUBTYPE_MAC: neigh["remote_port"] = MAC(neigh["remote_port"]) for i in neigh: if isinstance(neigh[i], str): neigh[i] = neigh[i].rstrip(smart_text("\x00")) if neigh["remote_capabilities"]: neigh["remote_capabilities"] = int( "".join(x for x in reversed("{0:016b}".format( ord(neigh["remote_capabilities"]) << 8 + 0x0))), 2, ) else: neigh["remote_capabilities"] = 0 r += [{ "local_interface": local_ports[v[0].split(".")[1]]["local_interface"], "neighbors": [neigh], }] return r
def get_data(self): mo1, mo2 = self.id.split("-") mo1 = ManagedObject.get_by_id(int(mo1)) if mo1 else None mo2 = ManagedObject.get_by_id(int(mo2)) if mo2 else None s_path = [mo1] if mo1 and mo2: try: s_path = get_shortest_path(mo1, mo2) except ValueError: s_path = [mo1, mo2] path = [] for mo in s_path: if not mo.x or not mo.y: continue if not path or mo.x != path[-1]["x"] or mo.y != path[-1]["y"]: path += [{ "x": mo.x, "y": mo.y, "objects": [{ "id": mo.id, "name": mo.name }] }] else: path[-1]["objects"] += [{"id": mo.id, "name": mo.name}] return {"mo1": mo1, "mo2": mo2, "path": smart_text(orjson.dumps(path))}
def parse_p_oid(self, msg: bytes) -> str: """ >>> BERDecoder().parse_p_oid("+\\x06\\x01\\x02\\x01\\x01\\x05\\x00") "1.3.6.1.2.1.1.5.0" """ self.last_oid = smart_text(parse_p_oid(msg)) return self.last_oid
def get_cli_results(self, spec): """ Returns "cli" section :param spec: :return: """ r = [] # Group by commands cmd_answers = OrderedDict() for ans in spec["answers"]: if ans["type"] == "cli": if ans["value"] not in cmd_answers: cmd_answers[ans["value"]] = [ans["name"]] continue cmd_answers[ans["value"]] += [ans["name"]] if not cmd_answers: return [] self.logger.debug("Collecting CLI beef") self.start_tracking() for cmd in cmd_answers: self.logger.debug("Collecting command: %s" % cmd) # Issue command try: self.cli(cmd) except self.ScriptError: pass # Append tracked data for rcmd, packets in self.iter_cli_tracking(): r += [{ "names": cmd_answers.get(rcmd, ["setup.cli"]), "request": smart_text(rcmd), "reply": [self.encode_cli(v) for v in packets], }] self.stop_tracking() return r
def snmp_v1_get(self, address, community, oid): """ Perform SNMP v1 GET and return result :param address: IP address :param community: SNMP v2c community :param oid: Resolved oid :returns: Result as a string, or None, when no response """ self.logger.debug("SNMP v1 GET %s %s", address, oid) try: result = yield snmp_get( address=address, oids=oid, community=community, version=SNMP_v1, tos=config.activator.tos, ioloop=self.service.ioloop, ) result = smart_text(result, errors="replace") if result else result self.logger.debug("SNMP GET %s %s returns %s", address, oid, result) except SNMPError as e: metrics["error", ("type", "snmp_v1_error")] += 1 result = None self.logger.debug("SNMP GET %s %s returns error %s", address, oid, e) raise tornado.gen.Return(result)
def get_row(p): r = [p.prefix, p.state.name, smart_text(p.vc) if p.vc else ""] for f in cf: v = getattr(p, f.name) r += [v if v is not None else ""] r += [p.description, p] return r
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: def to_suppress_logging(): return (method == "GET") and ( ((status == 200 or status == 429) and path in ("/health/", "/health")) or (status == 200 and path == "/metrics") ) def is_mon_request(): return status == 200 and path in ("/mon/", "/mon") and method == "GET" if scope["type"] != "http": await self.app(scope, receive, send) return t0 = perf_counter() try: await self.app(scope, receive, send) finally: t1 = perf_counter() status = 200 method = scope["method"] path = scope["path"] if to_suppress_logging(): pass elif is_mon_request(): self.logger.debug("Monitoring request (%s)", scope["client"][0]) metrics["mon_requests"] += 1 else: if scope["query_string"]: path = "%s?%s" % (path, smart_text(scope["query_string"])) remote_ip = scope["client"][0] status = 200 self.logger.info("%s %s (%s) %.2fms", method, path, remote_ip, 1000.0 * (t1 - t0)) metrics["http_requests", ("method", method.lower())] += 1 metrics["http_response", ("status", status)] += 1
def call(self, service, method, *args): """ JSON-RPC client """ self.t_id += 1 r = {"id": self.t_id, "method": method, "params": args} auth = None if self.account_name and self.account_password: auth = (self.account_name, self.account_password) r = smart_text(orjson.dumps(r)) logger.debug("JSON-RPC REQUEST: %s", r) try: req = requests.post(self.cp_url + service, data=r, auth=auth, verify=True) except Exception as e: logger.error("JSON-RPC Error: %s", e) raise self.Error(str(e)) try: response = req.json() logger.debug("JSON-RPC RESPONSE: %s", response) except ValueError as e: logger.error("Invalid JSON-RPC response: %s", e) raise self.Error("Invalid response") if response.get("error"): logger.error("JSON-RPC error: %s", response["error"]) raise self.Error(response["error"]) return response.get("result")
def format_frames(frames, reverse=config.traceback.reverse): def format_source(lineno, lines): r = [] for line in lines: r += ["%5d %s" % (lineno, line)] lineno += 1 return "\n".join(r) r = [] r += ["START OF TRACEBACK"] r += ["-" * 72] fr = frames[:] if reverse: fr.reverse() for f in fr: r += ["File: %s (Line: %s)" % (os.path.relpath(f["filename"]), f["lineno"])] r += ["Function: %s" % (f["function"])] if "pre_context_lineno" in f: r += [format_source(f["pre_context_lineno"], f["pre_context"])] r += ["%5d ==> %s" % (f["lineno"], f["context_line"])] r += [format_source(f["lineno"] + 1, f["post_context"])] r += ["Variables:"] for n, v in f["vars"]: try: pv = smart_text(repr(v)) if len(pv) > 72: pv = "\n" + pprint.pformat(v) except: # noqa pv = "repr() failed" r += ["%20s = %s" % (n, pv)] else: r += ["???"] r += ["-" * 72] r += ["END OF TRACEBACK"] return "\n".join(r)
def location(self, id): """ Return geo address for Managed Objects """ def chunkIt(seq, num): avg = len(seq) / float(num) out = [] last = 0.0 while last < len(seq): out.append(seq[int(last):int(last + avg)]) last += avg return out location = [] address = Object.get_by_id(id).get_address_text() if address: for res in address.split(","): adr = normalize_division(smart_text(res).strip().lower()) if None in adr and "" in adr: continue if None in adr: location += [adr[1].title().strip()] else: location += [" ".join(adr).title().strip()] res = chunkIt(location, 2) location_1 = ", ".join(res[0]) location_2 = ", ".join(res[1]) return [location_1, location_2] return ["", ""]
def execute_cli(self): # Getting pattern prompt self.cli("") v = self.get_cli_stream() pattern = smart_text(v.patterns["prompt"].pattern) fqdn = pattern.split(r"\(", 1)[1].split("\\)", 1)[0].replace("\\", "") return fqdn
def get_display_key(self): """ Return dereferenced key name """ if self.object: return smart_text(self.object) return self.attrs[self.ATTR_KEY]
def on_update_model(cls, sender, instance, **kwargs): """ Audit trail for INSERT and UPDATE operations """ # logger.debug("Logging change for %s", instance) changes = [] if kwargs.get("created", True): # Create op = "C" changes = [{ "field": f.name, "old": None, "new": cls.get_field(instance, f) } for f in sender._meta.fields] else: # Update op = "U" for f in sender._meta.fields: od = instance._old_values.get(f.attname) if od is not None: od = smart_text(od) nd = cls.get_field(instance, f) if nd != od: changes += [{"field": f.name, "old": od, "new": nd}] cls.log(sender, instance, op, changes)
async def snmp_v2c_get(self, address, community, oid): """ Perform SNMP v2c GET and return result :param address: IP address :param community: SNMP v2c community :param oid: Resolved oid :returns: Result as a string, or None, when no response """ self.logger.debug("SNMP v2c GET %s %s", address, oid) try: result = await snmp_get( address=address, oids=oid, community=community, version=SNMP_v2c, tos=config.activator.tos, ) self.logger.debug("SNMP GET %s %s returns %s", address, oid, result) result = smart_text(result, errors="replace") if result else result except SNMPError as e: metrics["error", ("type", "snmp_v2_error")] += 1 result = None self.logger.debug("SNMP GET %s %s returns error %s", address, oid, e) except Exception as e: result = None self.logger.debug("SNMP GET %s %s returns unknown error %s", address, oid, e) return result
def from_idna(s): """ Convert IDNA domain name to unicode """ if not s: return return ".".join(smart_text(x, encoding="idna") for x in s.split("."))
def parse_serial(self): r = [] if self.has_snmp(): # Trying SNMP try: # SNMPv2-MIB::sysDescr.0 for oid, x in self.snmp.getnext(mib["ENTITY-MIB::entPhysicalSerialNum"]): if not x: continue r += [x.strip(smart_text(" \x00"))] if r: return r except (self.snmp.TimeOutError, self.snmp.SNMPError): pass try: v = self.cli("display elabel slot 0") except self.CLISyntaxError: return [] v = list(self.rx_parts.finditer(v)) if v: v = v[0].groupdict() v = dict(x.split("=", 1) for x in v["part_body"].splitlines()) if "BarCode" in v: r += [v["BarCode"].strip()] return r
def on_data(self, message, records, *args, **kwargs): """ Called on new dispose message Message format <table>.<field1>. .. .<fieldN>\n <v1>\t...\t<vN>\n ... <v1>\t...\t<vN>\n """ if self.stopping: self.logger.info( "Message received during stopping, requeueing message") return False if self.restore_timeout: self.logger.info("ClickHouse is not available, requeueing message") return False if metrics["records_buffered"].value > config.chwriter.records_buffer: self.logger.info( "Input buffer is full (%s/%s). Deferring message", metrics["records_buffered"].value, config.chwriter.records_buffer, ) metrics["deferred_messages"] += 1 return False table, data = smart_text(records).split("\n", 1) self.logger.debug("Receiving %s", table) if "." in table or "|" in table: self.logger.error("Message in legacy format dropped: %s" % table) metrics["dropped_legacy_messages"] += 1 return True channel = self.get_channel(table) n = channel.feed(data) metrics["records_received"] += n metrics["records_buffered"] += n return True
def q(s): if isinstance(s, int): return str(s) elif isinstance(s, (list, tuple)): s = [q(x) for x in s] return "[%s]" % ", ".join(s) else: return '"%s"' % smart_text(s).replace("\\", "\\\\").replace("'", "\\'")
def parse_p_oid(self, msg): # type: (bytes) -> six.text_type """ >>> BERDecoder().parse_p_oid("+\\x06\\x01\\x02\\x01\\x01\\x05\\x00") "1.3.6.1.2.1.1.5.0" """ self.last_oid = parse_p_oid(msg) return smart_text(self.last_oid)
def fixport(port, port_type): # fix alcatel encode port like hex string remote_port = "u" if port_type == "5" and "\n " in port: remote_port = port.replace("\n ", "") remote_port = remote_port.replace(":", "").replace("\n", "") remote_port = smart_text(codecs.decode(remote_port, "hex")) elif port_type == "5" and "\n" in port: remote_port = port.replace("\n", "") remote_port = remote_port.replace(":", "").replace("\n", "") remote_port = smart_text(codecs.decode(remote_port, "hex")) elif port_type == "5" and "\n " not in port: remote_port = remote_port.replace(":", "").replace("\n", "") remote_port = smart_text(codecs.decode(remote_port, "hex")) elif port_type == "7": return port.replace("\n", "") return remote_port