def api_info_cloud(self, request, id, link_id): self.get_object_or_404(NetworkSegment, id=id) link = self.get_object_or_404(Link, id=link_id) r = { "id": str(link.id), "name": link.name or None, "description": link.description or None, "objects": [], "method": link.discovery_method, } o = defaultdict(list) for i in link.interfaces: o[i.managed_object] += [i] for mo in sorted(o, key=lambda x: x.name): r["objects"] += [{ "id": mo.id, "name": mo.name, "interfaces": [{ "name": i.name, "description": i.description or None, "status": i.status } for i in sorted(o[mo], key=lambda x: alnum_key(x.name))], }] return r
def _get_interface(iface, subs, links, ifcache): r = { "name": qs(iface["name"]), "type": iface["type"], "description": qs(iface.get("description")), "enabled_protocols": iface.get("enabled_protocols") or [], "admin_status": iface.get("admin_status", False), } if iface.get("ifindex"): r["snmp_ifindex"] = iface["ifindex"] if iface.get("mac"): r["mac"] = iface["mac"] if iface.get("aggregated_interface"): r["aggregated_interface"] = ifcache[ iface["aggregated_interface"]][-1] # Apply profile if iface.get("profile"): profile = InterfaceProfile.get_by_id(iface["profile"]) r["profile"] = ManagedObjectDataStream._get_interface_profile( profile) # Apply subinterfaces r["subinterfaces"] = [ ManagedObjectDataStream._get_subinterface(s) for s in sorted(subs, key=lambda x: alnum_key(x["name"])) ] # Apply links if links: r["link"] = ManagedObjectDataStream._get_link( iface, links, ifcache) return r
def api_info_link(self, request, id, link_id): def q(s): if isinstance(s, str): s = s.encode("utf-8") return s self.get_object_or_404(NetworkSegment, id=id) link = self.get_object_or_404(Link, id=link_id) r = { "id": str(link.id), "name": link.name or None, "description": link.description or None, "objects": [], "method": link.discovery_method, } o = defaultdict(list) for i in link.interfaces: o[i.managed_object] += [i] for mo in sorted(o, key=lambda x: x.name): r["objects"] += [{ "id": mo.id, "name": mo.name, "interfaces": [{ "name": i.name, "description": i.description or None, "status": i.status } for i in sorted(o[mo], key=lambda x: alnum_key(x.name))], }] # Get link bandwidth mo_in = defaultdict(float) mo_out = defaultdict(float) mos = [ManagedObject.get_by_id(mo["id"]) for mo in r["objects"]] metric_map, last_ts = get_interface_metrics(list(o)) for mo in o: if mo not in metric_map: continue for i in o[mo]: if i.name not in metric_map[mo]: continue mo_in[mo] += metric_map[mo][i.name]["Interface | Load | In"] mo_out[mo] += metric_map[mo][i.name]["Interface | Load | Out"] if len(mos) == 2: mo1, mo2 = mos r["utilisation"] = [ int(max(mo_in[mo1], mo_out[mo2])), int(max(mo_in[mo2], mo_out[mo1])), ] else: mv = list(mo_in.values()) + list(mo_out.values()) if mv: r["utilisation"] = [int(max(mv))] else: r["utilisation"] = 0 return r
def get_template_context(self) -> Dict[str, Any]: local_interfaces: List[Interface] = [] remote_interfaces: List[Interface] = [] for link in Link.objects.filter(linked_segments=self.attacker.id): for iface in link.interfaces: if iface.managed_object.segment.id == self.attacker.id: local_interfaces += [iface] else: remote_interfaces += [iface] return { "interfaces": list(sorted(local_interfaces, key=lambda x: alnum_key(x.name))), "parent_interfaces": list(sorted(remote_interfaces, key=lambda x: alnum_key(x.name))), "attacker": self.attacker, "target": self.target, }
def api_unlinked(self, request, object_id): def get_label(i): if i.description: return "%s (%s)" % (i.name, i.description) else: return i.name o = self.get_object_or_404(ManagedObject, id=int(object_id)) r = [{ "id": str(i.id), "label": get_label(i) } for i in Interface.objects.filter(managed_object=o.id, type="physical").order_by("name") if not i.link] return list(sorted(r, key=lambda x: alnum_key(x["label"])))
def split_interfaces(obj, interfaces): # type: (ManagedObject, List[Interface]) -> Tuple[List[Interface], List[Interface]] """ Split list of interfaces of the links to egress (belonging to `obj`) and ingress (leading out of object) :param obj: Managed Object :param interfaces: List of link interfaces :return: List of egress links, List of ingress links """ ingress = [] # type: List[Interface] egress = [] # type: List[Interface] for iface in sorted(interfaces, key=lambda x: alnum_key(x.name)): if iface.managed_object == obj: egress += [iface] else: ingress += [iface] return egress, ingress
def order_nodes(self, uplink, downlinks): """ Sort downlinks basing on uplink's interface :param uplink: managed object id :param downlinks: ids of downlinks :returns: sorted list of downlinks """ id_to_name = {} dl_map = {} # downlink -> uplink port for p in self.G.node[uplink]["ports"]: id_to_name[p["id"]] = sorted(p["ports"], key=alnum_key)[0] for dl in downlinks: for p in self.G.edges[uplink, dl]["ports"]: if p in id_to_name: dl_map[dl] = id_to_name[p] break return sorted(dl_map, key=lambda x: alnum_key(dl_map[x]))
def handle_portmap(self, portmap_objects=[]): for po in portmap_objects: for o in ManagedObjectSelector.get_objects_from_expression(po): if not o.remote_system: self.stdout.write("%s (%s, %s) NRI: N/A\n" % (o.name, o.address, o.platform)) continue portmapper = loader.get_loader(o.remote_system.name)(o) nri = o.remote_system.name self.stdout.write("%s (%s, %s) NRI: %s\n" % (o.name, o.address, o.platform, nri)) r = [] for i in Interface._get_collection().find( { "managed_object": o.id, "type": "physical" }, { "_id": 1, "name": 1, "nri_name": 1 }, ): rn = portmapper.to_remote(i["name"]) or self.PORT_ERROR if rn == self.PORT_ERROR: ln = self.PORT_ERROR else: ln = portmapper.to_local(rn) or self.PORT_ERROR if i.get("nri_name") == rn and ln != self.PORT_ERROR: status = "OK" elif not i.get("nri_name") and ln != self.PORT_ERROR: status = "Not in database" elif rn == self.PORT_ERROR: status = "Failed to convert to remote name" else: self.print(ln, rn, i.get("nri_name")) status = "Failed to convert to local name" r += [(i["name"], rn, i.get("nri_name", "--"), status)] r = [("Local", "Remote", "Interface NRI", "Status")] + list( sorted(r, key=lambda x: alnum_key(x[0]))) self.stdout.write( "%s\n" % format_table([0, 0, 0, 0], r, sep=" | ", hsep="-+-"))
def get_recommended_version(cls, platform): """ Get recommended version for platform :param platform: Platform instance :return: Version string or None """ if not platform: return None fp = FirmwarePolicy.objects.filter(platform=platform.id, status=FS_RECOMMENDED).first() if fp: return fp.firmware.version versions = [] for fp in FirmwarePolicy.objects.filter(platform=platform.id, status=FS_ACCEPTABLE): versions += [fp.firmware.version] if versions: # Get latest acceptable version return list(sorted(versions, key=lambda x: alnum_key(x)))[-1] return None
def _apply_interfaces(mo: ManagedObject, r): # id -> (object id, name) ifcache = {} # Get interfaces interfaces = sorted( Interface._get_collection().find({"managed_object": mo.id}), key=lambda x: alnum_key(x["name"]), ) # Populate cache for i in interfaces: ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Get subs subs = defaultdict(list) for s in SubInterface._get_collection().find({"managed_object": mo.id}): subs[s["interface"]] += [s] # Get links links = defaultdict(list) for link in Link._get_collection().find({"linked_objects": mo.id}): for li in link.get("interfaces", []): links[li] += [link] # Populate cache with linked interfaces if links: for i in Interface._get_collection().find( {"_id": {"$in": list(links)}}, {"_id": 1, "managed_object": 1, "name": 1} ): ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Get services svc_ids = [i["service"] for i in interfaces if i.get("service")] if svc_ids: services = {svc.id: svc for svc in Service.objects.filter(id__in=svc_ids)} else: services = {} # Populate r["interfaces"] = [ ManagedObjectDataStream._get_interface( i, subs[i["_id"]], links[i["_id"]], ifcache, set(mo.data.uplinks), services ) for i in interfaces ]
def _apply_interfaces(mo, r): # id -> (object id, name) ifcache = {} # Get interfaces interfaces = sorted( Interface._get_collection().find({"managed_object": mo.id}), key=lambda x: alnum_key(x["name"]), ) # Populate cache for i in interfaces: ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Get subs subs = defaultdict(list) for s in SubInterface._get_collection().find({"managed_object": mo.id}): subs[s["interface"]] += [s] # Get links links = defaultdict(list) for l in Link._get_collection().find({"linked_objects": mo.id}): for li in l.get("interfaces", []): links[li] += [l] # Populate cache with linked interfaces if links: for i in Interface._get_collection().find( {"_id": { "$in": list(links) }}, { "_id": 1, "managed_object": 1, "name": 1 }): ifcache[i["_id"]] = (i["managed_object"], i["name"]) # Populate r["interfaces"] = [ ManagedObjectDataStream._get_interface(i, subs[i["_id"]], links[i["_id"]], ifcache) for i in interfaces ]
def match_gte(v, cv): return alnum_key(v) >= alnum_key(cv)
def resolve_object_data(self, object): def interface_profile_has_metrics(profile): """ Check interface profile has metrics """ for m in profile.metrics: if m.enable_box or m.enable_periodic: return True return False def interface_radio_metrics(profile): """ Check interface profile has metrics """ metrics = [] for m in profile.metrics: if m.metric_type.name.startswith("Radio"): metrics.append(m.metric_type.field_name) if metrics: return metrics return None def interface_dom_metrics(profile): """ Check interface profile has metrics """ metrics = [] for m in profile.metrics: if m.metric_type.name.startswith("Interface | DOM"): metrics.append(m.metric_type.field_name) if metrics: return metrics return None def check_metrics(metric): """ Object check metrics """ if metric.name.startswith("Check"): return True return False port_types = [] object_metrics = [] object_check_metrics = [] lags = [] subif = [] radio_types = [] dom_types = [] selected_types = defaultdict(list) selected_ifaces = set(self.extra_vars.get("var_ifaces", "").split(",")) # Get all interface profiles with configurable metrics all_ifaces = list(Interface.objects.filter(managed_object=self.object.id)) iprof = set(i.profile for i in all_ifaces) # @todo: Order by priority profiles = [p for p in iprof if interface_profile_has_metrics(p)] # Create charts for configured interface metrics for profile in profiles: ifaces = [i for i in all_ifaces if i.profile == profile] ports = [] radio = [] dom = [] for iface in sorted(ifaces, key=lambda el: alnum_key(el.name)): if iface.type == "SVI" and not iface.profile.allow_subinterface_metrics: continue if iface.type == "aggregated" and iface.lag_members: lags += [ { "name": iface.name, "ports": [i.name for i in iface.lag_members], "descr": self.str_cleanup( iface.description, remove_letters=TITLE_BAD_CHARS ) or "No description", "status": [ ", Status : ".join([i.name, i.status]) for i in iface.lag_members ], } ] continue if interface_radio_metrics(profile): radio += [ { "name": iface.name, "descr": self.str_cleanup( iface.description, remove_letters=TITLE_BAD_CHARS ), "status": iface.status, "metrics": interface_radio_metrics(profile), } ] if interface_dom_metrics(profile) and iface.type == "physical": dom += [ { "name": iface.name, "descr": self.str_cleanup( iface.description, remove_letters=TITLE_BAD_CHARS ), "status": iface.status, "metrics": interface_dom_metrics(profile), "type": profile.id, "profile_name": profile.name, } ] if iface.type == "physical": ports += [ { "name": iface.name, "descr": self.str_cleanup( iface.description, remove_letters=TITLE_BAD_CHARS ), "status": iface.status, } ] if iface.profile.allow_subinterface_metrics: subif += [ { "name": si.name, "descr": self.str_cleanup( si.description, remove_letters=TITLE_BAD_CHARS ), } for si in SubInterface.objects.filter(interface=iface) ] if iface.name in selected_ifaces: selected_types[profile.id] += [iface.name] if ports: port_types += [{"type": profile.id, "name": profile.name, "ports": ports}] if radio: radio_types += [{"type": profile.id, "name": profile.name, "ports": radio}] if dom: dom_types += dom if self.object.object_profile.report_ping_rtt: object_metrics += ["rtt"] om = [] ocm = [] for m in self.object.object_profile.metrics or []: mt = MetricType.get_by_id(m["metric_type"]) if not mt or not (m.get("enable_periodic", False) or m.get("enable_box", False)): continue if check_metrics(mt): ocm += [{"name": mt.name, "metric": mt.field_name}] continue om += [mt.name] object_metrics.extend(sorted(om)) object_check_metrics.extend(sorted(ocm, key=operator.itemgetter("name"))) return { "port_types": port_types, "selected_types": selected_types, "object_metrics": object_metrics, "object_check_metrics": object_check_metrics, "lags": lags, "subifaces": subif, "radio_types": radio_types, "dom_types": sorted(dom_types, key=lambda x: alnum_key(x["name"])), }
def match_lte(v, cv): return alnum_key(v) <= alnum_key(cv)
def match_lt(v, cv): return alnum_key(v) < alnum_key(cv)
def sorted_iname(s): return list(sorted(s, key=lambda x: alnum_key(x["name"])))
def get_data(self): intervals = ( ("y", 31557617), # 60 * 60 * 24 * 7 * 52 ("w", 604800), # 60 * 60 * 24 * 7 ("d", 86400), # 60 * 60 * 24 ("h", 3600), # 60 * 60 ("m", 60), ("s", 1), ) def display_time(seconds): result = [] for name, count in intervals: value = seconds // count if value: seconds -= value * count if value == 1: name = name.rstrip("s") result.append("{}{}".format(value, name)) return ", ".join(result[:-1]) def sortdict(dct): kys = sorted(dct.keys()) res = OrderedDict() for x in kys: for k, v in dct.items(): if k == x: res[k] = v return res def get_container_path(self): # Get container path if not self.object: return None cp = [] if self.object.container: c = self.object.container.id while c: try: o = Object.objects.get(id=c) # @todo: Address data if o.container: cp.insert(0, {"id": o.id, "name": o.name}) c = o.container.id if o.container else None except DoesNotExist: metrics["error", ("type", "no_such_object")] += 1 break return cp if not self.object: return None # @todo: Stage # @todo: Service range # @todo: Open TT now = datetime.datetime.now() # Get object status and uptime alarms = list(ActiveAlarm.objects.filter(managed_object=self.object.id)) current_start = None duration = None if self.object.is_managed: if self.object.get_status(): if alarms: current_state = "alarm" else: current_state = "up" uptime = Uptime.objects.filter(object=self.object.id, stop=None).first() if uptime: current_start = uptime.start else: current_state = "down" outage = Outage.objects.filter(object=self.object.id, stop=None).first() if outage is not None: current_start = outage.start else: current_state = "unmanaged" if current_start: duration = now - current_start cp = get_container_path(self) # MAC addresses macs = [] o_macs = DiscoveryID.macs_for_object(self.object) if o_macs: for f, l in o_macs: if f == l: macs += [f] else: macs += ["%s - %s" % (f, l)] # Hostname hostname = "" did = DiscoveryID.objects.filter(object=self.object.id).first() if did and did.hostname: hostname = did.hostname # Links uplinks = set(self.object.data.uplinks) if len(uplinks) > 1: if self.object.segment.lost_redundancy: redundancy = "L" else: redundancy = "R" else: redundancy = "N" links = [] for _link in Link.object_links(self.object): local_interfaces = [] remote_interfaces = [] remote_objects = set() for iface in _link.interfaces: if iface.managed_object.id == self.object.id: local_interfaces += [iface] else: remote_interfaces += [iface] remote_objects.add(iface.managed_object) if len(remote_objects) == 1: ro = remote_objects.pop() if ro.id in uplinks: role = "uplink" else: role = "downlink" links += [ { "id": _link.id, "role": role, "local_interface": sorted( local_interfaces, key=lambda x: alnum_key(x.name) ), "remote_object": ro, "remote_interface": sorted( remote_interfaces, key=lambda x: alnum_key(x.name) ), "remote_status": "up" if ro.get_status() else "down", } ] links = sorted( links, key=lambda x: (x["role"] != "uplink", alnum_key(x["local_interface"][0].name)), ) # Build global services summary service_summary = ServiceSummary.get_object_summary(self.object) # Interfaces interfaces = [] mo = ManagedObject.objects.filter(id=self.object.id) mo = mo[0] ifaces_metrics, last_ts = get_interface_metrics(mo) ifaces_metrics = ifaces_metrics[mo] objects_metrics, last_time = get_objects_metrics(mo) objects_metrics = objects_metrics.get(mo) # Sensors sensors_metrics = None s_metrics = None sensors = {} s_meta = [] STATUS = {0: "OK", 1: "Alarm"} meric_map = {} if mo.get_caps().get("Sensor | Controller"): for mc in MetricType.objects.filter(scope=MetricScope.objects.get(name="Environment")): if meric_map: meric_map["map"].update({mc.field_name: mc.name}) else: meric_map = {"table_name": mc.scope.table_name, "map": {mc.field_name: mc.name}} sensors_metrics, last_ts = get_interface_metrics(mo, meric_map) sensors_metrics = sensors_metrics[mo] m_tp = {} if mo.object_profile.metrics: for mt in mo.object_profile.metrics: if mt.get("threshold_profile"): threshold_profile = ThresholdProfile.get_by_id(mt.get("threshold_profile")) m_tp[MetricType.get_by_id(mt.get("metric_type")).name] = threshold_profile data = {} meta = [] metric_type_name = dict(MetricType.objects.filter().scalar("name", "measure")) metric_type_field = dict(MetricType.objects.filter().scalar("field_name", "measure")) if objects_metrics: for path, mres in objects_metrics.items(): t_v = False for key in mres: m_path = path if any(path.split("|")) else key m_path = " | ".join(kk.strip() for kk in m_path.split("|")) if m_tp.get(key): t_v = self.get_threshold_config(m_tp.get(key), int(mres[key])) val = { "name": m_path, "type": "" if m_path == "Object | SysUptime" else metric_type_name[key], "value": display_time(int(mres[key])) if m_path == "Object | SysUptime" else mres[key], "threshold": t_v, } if data.get(key): data[key] += [val] else: data[key] = [val] data = sortdict(data) for k, d in data.items(): collapsed = False if len(d) == 1: collapsed = True for dd in d: isdanger = False if dd["threshold"]: isdanger = True collapsed = True meta.append({"name": k, "value": d, "collapsed": collapsed, "isdanger": isdanger}) for i in Interface.objects.filter(managed_object=self.object.id, type="physical"): load_in = "-" load_out = "-" errors_in = "-" errors_out = "-" iface_metrics = ifaces_metrics.get(str(i.name)) if iface_metrics: for key, value in iface_metrics.items(): metric_type = metric_type_name.get(key) or metric_type_field.get(key) if key == "Interface | Load | In": load_in = ( "%s%s" % (self.humanize_speed(value, metric_type), metric_type) if value else "-" ) if key == "Interface | Load | Out": load_out = ( "%s%s" % (self.humanize_speed(value, metric_type), metric_type) if value else "-" ) if key == "Interface | Errors | In": errors_in = value if value else "-" if key == "Interface | Errors | Out": errors_out = value if value else "-" interfaces += [ { "id": i.id, "name": i.name, "admin_status": i.admin_status, "oper_status": i.oper_status, "mac": i.mac or "", "full_duplex": i.full_duplex, "load_in": load_in, "load_out": load_out, "errors_in": errors_in, "errors_out": errors_out, "speed": max([i.in_speed or 0, i.out_speed or 0]) / 1000, "untagged_vlan": None, "tagged_vlan": None, "profile": i.profile, "service": i.service, "service_summary": service_summary.get("interface").get(i.id, {}), "description": i.description, } ] if sensors_metrics: s_metrics = sensors_metrics.get(str(i.name)) if s_metrics: sens_metrics = [] for i_metrics in i.profile.metrics: sens_metrics.append(i_metrics.metric_type.name) for key, value in s_metrics.items(): if key not in sens_metrics: continue val = { "name": key, "type": metric_type_name[key], "value": STATUS.get(value) if metric_type_name[key] == " " else value, "threshold": None, } if sensors.get(i.name): sensors[i.name] += [val] else: sensors[i.name] = [val] si = list(i.subinterface_set.filter(enabled_afi="BRIDGE")) if len(si) == 1: si = si[0] interfaces[-1]["untagged_vlan"] = si.untagged_vlan interfaces[-1]["tagged_vlans"] = list_to_ranges(si.tagged_vlans).replace(",", ", ") if sensors: sensors = sortdict(sensors) for k, d in sensors.items(): for dd in d: isdanger = False if dd["threshold"]: isdanger = True s_meta.append({"name": k, "value": d, "isdanger": isdanger}) interfaces = sorted(interfaces, key=lambda x: alnum_key(x["name"])) # Resource groups # Service groups (i.e. server) static_services = set(self.object.static_service_groups) service_groups = [] for rg_id in self.object.effective_service_groups: rg = ResourceGroup.get_by_id(rg_id) service_groups += [ { "id": rg_id, "name": rg.name, "technology": rg.technology, "is_static": rg_id in static_services, } ] # Client groups (i.e. client) static_clients = set(self.object.static_client_groups) client_groups = [] for rg_id in self.object.effective_client_groups: rg = ResourceGroup.get_by_id(rg_id) client_groups += [ { "id": rg_id, "name": rg.name, "technology": rg.technology, "is_static": rg_id in static_clients, } ] # @todo: Administrative domain path # Alarms alarm_list = [] for a in alarms: alarm_list += [ { "id": a.id, "root_id": self.get_root(alarms), "timestamp": a.timestamp, "duration": now - a.timestamp, "subject": a.subject, "managed_object": a.managed_object, "service_summary": { "service": SummaryItem.items_to_dict(a.total_services), "subscriber": SummaryItem.items_to_dict(a.total_subscribers), }, "alarm_class": a.alarm_class, } ] alarm_list = sorted(alarm_list, key=operator.itemgetter("timestamp")) # Maintenance maintenance = [] for m in Maintenance.objects.filter( affected_objects__object=self.object.id, is_completed=False, start__lte=now + datetime.timedelta(hours=1), ): maintenance += [ { "maintenance": m, "id": m.id, "subject": m.subject, "start": m.start, "stop": m.stop, "in_progress": m.start <= now, } ] # Get Inventory inv = [] for p in self.object.get_inventory(): c = self.get_nested_inventory(p) c["name"] = p.name or self.object.name inv += [c] # Build result if self.object.platform is not None: platform = self.object.platform.name else: platform = "Unknown" if self.object.version is not None: version = self.object.version.version else: version = "" r = { "id": self.object.id, "object": self.object, "name": self.object.name, "address": self.object.address, "platform": platform, # self.object.platform.name if self.object.platform else "Unknown", "version": version, # self.object.version.version if self.object.version else "", "description": self.object.description, "object_profile": self.object.object_profile.id, "object_profile_name": self.object.object_profile.name, "hostname": hostname, "macs": ", ".join(sorted(macs)), "segment": self.object.segment, "firmware_status": FirmwarePolicy.get_status(self.object.platform, self.object.version), "firmware_recommended": FirmwarePolicy.get_recommended_version(self.object.platform), "service_summary": service_summary, "container_path": cp, "current_state": current_state, # Start of uptime/downtime "current_start": current_start, # Current uptime/downtime "current_duration": duration, "service_groups": service_groups, "client_groups": client_groups, "tt": [], "links": links, "alarms": alarm_list, "interfaces": interfaces, "metrics": meta, "sensors": s_meta, "maintenance": maintenance, "redundancy": redundancy, "inventory": self.flatten_inventory(inv), "serial_number": self.object.get_attr("Serial Number"), "attributes": list( ManagedObjectAttribute.objects.filter(managed_object=self.object.id) ), "confdb": None, } try: r["confdb"] = self.object.get_confdb() except (SyntaxError, ValueError): pass return r
def iter_nodes(self): if self.children: for name in sorted(self.children, key=lambda x: alnum_key(str(x or ""))): yield self.children[name]
def test_alnum_key(input, expected): assert alnum_key(input) == expected
def get_interfaces(mo): return sorted( Interface.objects.filter(managed_object=mo.id, type="physical"), key=lambda x: alnum_key(x.name), )
def match_gt(v, cv): return alnum_key(v) > alnum_key(cv)