def _full_updatemacmap(configmanager): global vintage global _macmap global _nodesbymac global _switchportmap global _macsbyswitch global switchbackoff start = util.monotonic_time() with mapupdating: vintage = util.monotonic_time() # Clear all existing entries _macmap = {} _nodesbymac = {} _switchportmap = {} _macsbyswitch = {} if configmanager.tenant is not None: raise exc.ForbiddenRequest( 'Network topology not available to tenants') # here's a list of switches... need to add nodes that are switches nodelocations = configmanager.get_node_attributes( configmanager.list_nodes(), ('net*.switch', 'net*.switchport')) switches = set([]) for node in nodelocations: cfg = nodelocations[node] for attr in cfg: if not attr.endswith('.switch') or 'value' not in cfg[attr]: continue curswitch = cfg[attr].get('value', None) if not curswitch: continue switches.add(curswitch) switchportattr = attr + 'port' if switchportattr in cfg: portname = cfg[switchportattr].get('value', '') if not portname: continue if curswitch not in _switchportmap: _switchportmap[curswitch] = {} if portname in _switchportmap[curswitch]: log.log({ 'error': 'Duplicate switch topology config ' 'for {0} and {1}'.format( node, _switchportmap[curswitch][portname]) }) _switchportmap[curswitch][portname] = None else: _switchportmap[curswitch][portname] = node switchauth = get_switchcreds(configmanager, switches) pool = GreenPool(64) for ans in pool.imap(_map_switch, switchauth): vintage = util.monotonic_time() yield ans endtime = util.monotonic_time() duration = endtime - start duration = duration * 15 # wait 15 times as long as it takes to walk # avoid spending a large portion of the time hitting switches with snmp # requests if duration > switchbackoff: switchbackoff = duration
def __init__(self, node, configmanager, width=80, height=24): self.clearpending = False self.clearerror = False self.initsize = (width, height) self._dologging = True self._is_local = True self._isondemand = False self.error = None self._retrytime = 0 self.cfgmgr = configmanager self.node = node self.connectstate = 'unconnected' self._isalive = True self.buffer = pyte.Screen(100, 31) self.termstream = pyte.ByteStream() self.termstream.attach(self.buffer) self.livesessions = set([]) self.utf8decoder = codecs.getincrementaldecoder('utf-8')() if self._logtobuffer: self.logger = log.Logger(node, console=True, tenant=configmanager.tenant) (text, termstate, timestamp) = (b'', 0, False) # when reading from log file, we will use wall clock # it should usually match walltime. self.lasttime = 0 if timestamp: timediff = time.time() - timestamp if timediff > 0: self.lasttime = util.monotonic_time() - timediff else: # wall clock has gone backwards, use current time as best # guess self.lasttime = util.monotonic_time() self.clearbuffer() self.appmodedetected = False self.shiftin = None self.reconnect = None if termstate & 1: self.appmodedetected = True if termstate & 2: self.shiftin = b'0' self.users = {} self._attribwatcher = None self._console = None self.connectionthread = None self.send_break = None if self._genwatchattribs: self._attribwatcher = self.cfgmgr.watch_attributes( (self.node, ), self._genwatchattribs, self._attribschanged) self.check_isondemand() if not self._isondemand: self.connectstate = 'connecting' eventlet.spawn(self._connect)
def _recheck_single_unknown(configmanager, mac): global rechecker global rechecktime info = unknown_info.get(mac, None) if not info: return if info['handler'] != pxeh and not info.get('addresses', None): #log.log({'info': 'Missing address information in ' + repr(info)}) return handler = info['handler'].NodeHandler(info, configmanager) if handler.https_supported and not handler.https_cert: if handler.cert_fail_reason == 'unreachable': log.log({ 'info': '{0} with hwaddr {1} is not reachable at {2}' ''.format(handler.devname, info['hwaddr'], handler.ipaddr) }) # addresses data is bad, delete the offending ip info['addresses'] = [ x for x in info.get('addresses', []) if x != handler.ipaddr ] # TODO(jjohnson2): rescan due to bad peer addr data? # not just wait around for the next announce return log.log({ 'info': '{0} with hwaddr {1} at address {2} is not yet running ' 'https, will examine later'.format(handler.devname, info['hwaddr'], handler.ipaddr) }) if rechecker is not None and rechecktime > util.monotonic_time() + 300: rechecker.cancel() # if cancel did not result in dead, then we are in progress if rechecker is None or rechecker.dead: rechecktime = util.monotonic_time() + 300 rechecker = eventlet.spawn_after(300, _periodic_recheck, configmanager) return nodename = get_nodename(configmanager, handler, info) if nodename: if handler.https_supported: dp = configmanager.get_node_attributes( [nodename], ('pubkeys.tls_hardwaremanager', )) lastfp = dp.get(nodename, {}).get('pubkeys.tls_hardwaremanager', {}).get('value', None) if util.cert_matches(lastfp, handler.https_cert): info['nodename'] = nodename known_nodes[nodename][info['hwaddr']] = info info['discostatus'] = 'discovered' return # already known, no need for more discopool.spawn_n(eval_node, configmanager, handler, info, nodename)
def __init__(self, operation, node, element, cfd, inputdata, cfg, output): self.sensormap = {} self.invmap = {} self.output = output self.sensorcategory = None self.broken = False self.error = None eventlet.sleep(0) self.cfg = cfd[node] self.loggedin = False self.node = node self.element = element self.op = operation connparams = get_conn_params(node, self.cfg) self.ipmicmd = None self.inputdata = inputdata self.tenant = cfg.tenant tenant = cfg.tenant if ((node, tenant) not in persistent_ipmicmds or not persistent_ipmicmds[(node, tenant)].ipmi_session.logged): try: persistent_ipmicmds[(node, tenant)].close_confluent() except KeyError: # was no previous session pass try: persistent_ipmicmds[(node, tenant)] = IpmiCommandWrapper( node, cfg, bmc=connparams['bmc'], userid=connparams['username'], password=connparams['passphrase'], kg=connparams['kg'], port=connparams['port'], onlogon=self.logged) ipmisess = persistent_ipmicmds[(node, tenant)].ipmi_session begin = util.monotonic_time() while ((not (self.broken or self.loggedin)) and (util.monotonic_time() - begin) < 180): ipmisess.wait_for_rsp(180) if not (self.broken or self.loggedin): raise exc.TargetEndpointUnreachable("Login process to " + connparams['bmc'] + " died") except socket.gaierror as ge: if ge[0] == -2: raise exc.TargetEndpointUnreachable(ge[1]) raise self.ipmicmd = persistent_ipmicmds[(node, tenant)]
def __init__(self, node, configmanager): self._dologging = True self._isondemand = False self.error = None self.cfgmgr = configmanager self.node = node self.connectstate = 'unconnected' self._isalive = True self.buffer = bytearray() self.livesessions = set([]) if self._logtobuffer: self.logger = log.Logger(node, console=True, tenant=configmanager.tenant) (text, termstate, timestamp) = self.logger.read_recent_text(8192) else: (text, termstate, timestamp) = ('', 0, False) # when reading from log file, we will use wall clock # it should usually match walltime. self.lasttime = 0 if timestamp: timediff = time.time() - timestamp if timediff > 0: self.lasttime = util.monotonic_time() - timediff else: # wall clock has gone backwards, use current time as best # guess self.lasttime = util.monotonic_time() self.buffer += text self.appmodedetected = False self.shiftin = None self.reconnect = None if termstate & 1: self.appmodedetected = True if termstate & 2: self.shiftin = '0' self.users = {} self._attribwatcher = None self._console = None self.connectionthread = None self.send_break = None if self._genwatchattribs: self._attribwatcher = self.cfgmgr.watch_attributes( (self.node, ), self._genwatchattribs, self._attribschanged) self.check_isondemand() if not self._isondemand: self.connectstate = 'connecting' eventlet.spawn(self._connect)
def _find_service(service, target): net4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) net6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) if target: addrs = socket.getaddrinfo(target, 1900, 0, socket.SOCK_DGRAM) for addr in addrs: host = addr[4][0] if addr[0] == socket.AF_INET: net4.sendto(smsg.format(host, service), addr[4]) elif addr[0] == socket.AF_INET6: host = '[{0}]'.format(host) net6.sendto(smsg.format(host, service), addr[4]) else: net4.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) for idx in util.list_interface_indexes(): net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, idx) try: net6.sendto(smsg.format('[{0}]'.format(mcastv6addr), service), (mcastv6addr, 1900, 0, 0)) except socket.error: # ignore interfaces without ipv6 multicast causing error pass for i4 in util.list_ips(): if 'broadcast' not in i4: continue addr = i4['addr'] bcast = i4['broadcast'] net4.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(addr)) net4.sendto(smsg.format(mcastv4addr, service), (mcastv4addr, 1900)) net4.sendto(smsg.format(bcast, service), (bcast, 1900)) # SSDP by spec encourages responses to spread out over a 3 second interval # hence we must be a bit more patient deadline = util.monotonic_time() + 4 r, _, _ = select.select((net4, net6), (), (), 4) peerdata = {} while r: for s in r: (rsp, peer) = s.recvfrom(9000) neighutil.refresh_neigh() _parse_ssdp(peer, rsp, peerdata) timeout = deadline - util.monotonic_time() if timeout < 0: timeout = 0 r, _, _ = select.select((net4, net6), (), (), timeout) for nid in peerdata: yield peerdata[nid]
def _handle_console_output(self, data): if type(data) == int: if data == conapi.ConsoleEvent.Disconnect: self._got_disconnected() return elif data == '': # ignore empty strings from a cconsole provider return if '\x1b[?1l' in data: # request for ansi mode cursor keys self.appmodedetected = False if '\x1b[?1h' in data: # remember the session wants the client to use # 'application mode' Thus far only observed on esxi self.appmodedetected = True if '\x1b)0' in data: # console indicates it wants access to special drawing characters self.shiftin = '0' eventdata = 0 if self.appmodedetected: eventdata |= 1 if self.shiftin is not None: eventdata |= 2 self.log(data, eventdata=eventdata) self.lasttime = util.monotonic_time() if isinstance(data, bytearray) or isinstance(data, bytes): self.buffer += data else: self.buffer += data.encode('utf-8') # TODO: analyze buffer for registered events, examples: # panics # certificate signing request if len(self.buffer) > 16384: self.flushbuffer() self._send_rcpts(data)
def _handle_console_output(self, data): if type(data) == int: if data == conapi.ConsoleEvent.Disconnect: self._got_disconnected() return elif data == '': # ignore empty strings from a cconsole provider return if '\x1b[?1l' in data: # request for ansi mode cursor keys self.appmodedetected = False if '\x1b[?1h' in data: # remember the session wants the client to use # 'application mode' Thus far only observed on esxi self.appmodedetected = True if '\x1b)0' in data: # console indicates it wants access to special drawing characters self.shiftin = '0' eventdata = 0 if self.appmodedetected: eventdata |= 1 if self.shiftin is not None: eventdata |= 2 # TODO: analyze buffer for registered events, examples: # panics # certificate signing request if self.clearpending or self.clearerror: self.clearpending = False self.clearerror = False self.feedbuffer(b'\x1bc\x1b[2J\x1b[1;1H') self._send_rcpts(b'\x1bc\x1b[2J\x1b[1;1H') self._send_rcpts(_utf8_normalize(data, self.shiftin, self.utf8decoder)) self.log(data, eventdata=eventdata) self.lasttime = util.monotonic_time() self.feedbuffer(data)
def __init__(self, node, configmanager): self._dologging = True self._isondemand = False self.error = None self.cfgmgr = configmanager self.node = node self.connectstate = 'unconnected' self._isalive = True self.buffer = bytearray() self.livesessions = set([]) if self._logtobuffer: self.logger = log.Logger(node, console=True, tenant=configmanager.tenant) (text, termstate, timestamp) = self.logger.read_recent_text(8192) else: (text, termstate, timestamp) = ('', 0, False) # when reading from log file, we will use wall clock # it should usually match walltime. self.lasttime = 0 if timestamp: timediff = time.time() - timestamp if timediff > 0: self.lasttime = util.monotonic_time() - timediff else: # wall clock has gone backwards, use current time as best # guess self.lasttime = util.monotonic_time() self.buffer += text self.appmodedetected = False self.shiftin = None self.reconnect = None if termstate & 1: self.appmodedetected = True if termstate & 2: self.shiftin = '0' self.users = {} self._attribwatcher = None self._console = None self.connectionthread = None self.send_break = None if self._genwatchattribs: self._attribwatcher = self.cfgmgr.watch_attributes( (self.node,), self._genwatchattribs, self._attribschanged) self.check_isondemand() if not self._isondemand: self.connectstate = 'connecting' eventlet.spawn(self._connect)
def get_buffer_age(self): """Return age of buffered data Returns age in seconds of the buffered data or False in the event of calling before buffered data""" if self.lasttime: return util.monotonic_time() - self.lasttime return False
def _extract_neighbor_data_b(args): """Build LLDP data about elements connected to switch args are carried as a tuple, because of eventlet convenience """ switch, password, user, cfm, force = args[:5] vintage = _neighdata.get(switch, {}).get('!!vintage', 0) now = util.monotonic_time() if vintage > (now - 60) and not force: return lldpdata = {'!!vintage': now} try: return _extract_neighbor_data_affluent(switch, user, password, cfm, lldpdata) except Exception: pass conn = snmp.Session(switch, password, user) sid = None for sysid in conn.walk('1.3.6.1.2.1.1.2'): sid = str(sysid[1][6:]) _noaffluent.add(switch) idxtoifname = {} idxtoportid = {} _chassisidbyswitch[switch] = sanitize( list(conn.walk('1.0.8802.1.1.2.1.3.2'))[0][1]) for oidindex in conn.walk('1.0.8802.1.1.2.1.3.7.1.3'): idx = oidindex[0][-1] idxtoportid[idx] = sanitize(oidindex[1]) for oidindex in conn.walk('1.0.8802.1.1.2.1.3.7.1.4'): idx = oidindex[0][-1] idxtoifname[idx] = _lldpdesc_to_ifname(sid, idx, str(oidindex[1])) for remotedesc in conn.walk('1.0.8802.1.1.2.1.4.1.1.10'): iname = idxtoifname[remotedesc[0][-2]] _init_lldp(lldpdata, iname, remotedesc[0][-2], idxtoportid, switch) _extract_extended_desc(lldpdata[iname], remotedesc[1], user) for remotename in conn.walk('1.0.8802.1.1.2.1.4.1.1.9'): iname = idxtoifname[remotename[0][-2]] _init_lldp(lldpdata, iname, remotename[0][-2], idxtoportid, switch) lldpdata[iname]['peername'] = str(remotename[1]) for remotename in conn.walk('1.0.8802.1.1.2.1.4.1.1.7'): iname = idxtoifname[remotename[0][-2]] _init_lldp(lldpdata, iname, remotename[0][-2], idxtoportid, switch) lldpdata[iname]['peerportid'] = sanitize(remotename[1]) for remoteid in conn.walk('1.0.8802.1.1.2.1.4.1.1.5'): iname = idxtoifname[remoteid[0][-2]] _init_lldp(lldpdata, iname, remoteid[0][-2], idxtoportid, switch) lldpdata[iname]['peerchassisid'] = sanitize(remoteid[1]) for entry in lldpdata: if entry == '!!vintage': continue entry = lldpdata[entry] entry['switch'] = switch peerid = '{0}.{1}'.format( entry.get('peerchassisid', '').replace(':', '-').replace('/', '-'), entry.get('peerportid', '').replace(':', '-').replace('/', '-')) entry['peerid'] = peerid _neighbypeerid[peerid] = entry _neighdata[switch] = lldpdata
def find_node_by_mac(mac, configmanager): now = util.monotonic_time() if vintage and (now - vintage) < 90 and mac in _nodesbymac: return _nodesbymac[mac] # do not actually sweep switches more than once every 30 seconds # however, if there is an update in progress, wait on it for _ in update_macmap(configmanager, vintage and (now - vintage) < 30): if mac in _nodesbymac: return _nodesbymac[mac] # If update_mac bailed out, still check one last time return _nodesbymac.get(mac, None)
def find_nodeinfo_by_mac(mac, configmanager): now = util.monotonic_time() if vintage and (now - vintage) < 90 and mac in _nodesbymac: return _nodesbymac[mac][0], {'maccount': _nodesbymac[mac][1]} # do not actually sweep switches more than once every 30 seconds # however, if there is an update in progress, wait on it for _ in update_macmap(configmanager, vintage and (now - vintage) < switchbackoff): if mac in _nodesbymac: return _nodesbymac[mac][0], {'maccount': _nodesbymac[mac][1]} # If update_mac bailed out, still check one last time if mac in _nodesbymac: return _nodesbymac[mac][0], {'maccount': _nodesbymac[mac][1]} return None, {'maccount': 0}
def _update_neighbors_backend(configmanager, force): global _neighdata global _neighbypeerid vintage = _neighdata.get('!!vintage', 0) now = util.monotonic_time() if vintage > (now - 60) and not force: return _neighdata = {'!!vintage': now} _neighbypeerid = {'!!vintage': now} switches = netutil.list_switches(configmanager) switchcreds = netutil.get_switchcreds(configmanager, switches) switchcreds = [ x + (force,) for x in switchcreds] pool = GreenPool(64) for ans in pool.imap(_extract_neighbor_data, switchcreds): yield ans
def _periodic_recheck(configmanager): global rechecker global rechecktime rechecker = None try: _recheck_nodes((), configmanager) except Exception: traceback.print_exc() log.log({'error': 'Unexpected error during discovery, check debug ' 'logs'}) # if rechecker is set, it means that an accelerated schedule # for rechecker was requested in the course of recheck_nodes if rechecker is None: rechecktime = util.monotonic_time() + 900 rechecker = eventlet.spawn_after(900, _periodic_recheck, configmanager)
def start_detection(): global attribwatcher global rechecker _map_unique_ids() cfg = cfm.ConfigManager(None) allnodes = cfg.list_nodes() attribwatcher = cfg.watch_attributes( allnodes, ('discovery.policy', 'net*.switch', 'hardwaremanagement.manager', 'net*.switchport', 'id.uuid', 'pubkeys.tls_hardwaremanager'), _recheck_nodes) cfg.watch_nodecollection(newnodes) eventlet.spawn_n(slp.snoop, safe_detected) eventlet.spawn_n(pxe.snoop, safe_detected) if rechecker is None: rechecktime = util.monotonic_time() + 900 rechecker = eventlet.spawn_after(900, _periodic_recheck, cfg)
def _handle_neighbor_query(pathcomponents, configmanager): choices, parms, listrequested, childcoll = _parameterize_path( pathcomponents) if not childcoll: # this means it's a single entry with by-peerid # guaranteed if (parms['by-peerid'] not in _neighbypeerid and _neighbypeerid.get('!!vintage', 0) < util.monotonic_time() - 60): list(update_neighbors(configmanager)) if parms['by-peerid'] not in _neighbypeerid: raise exc.NotFoundException('No matching peer known') return _dump_neighbordatum(_neighbypeerid[parms['by-peerid']]) if not listrequested: # the query is for currently valid choices return [msg.ChildCollection(x + '/') for x in sorted(list(choices))] if listrequested not in multi_selectors | single_selectors: raise exc.NotFoundException('{0} is not found'.format(listrequested)) if 'by-switch' in parms: update_switch_data(parms['by-switch'], configmanager) else: list(update_neighbors(configmanager)) return list_info(parms, listrequested)
def _handle_console_output(self, data): if type(data) == int: if data == conapi.ConsoleEvent.Disconnect: self._got_disconnected() return elif data in (b'', u''): # ignore empty strings from a cconsole provider return if not isinstance(data, bytes): data = data.encode('utf-8') eventdata = 0 # TODO: analyze buffer for registered events, examples: # panics # certificate signing request if self.clearpending or self.clearerror: self.clearpending = False self.clearerror = False self.feedbuffer(b'\x1bc\x1b[2J\x1b[1;1H') self._send_rcpts(b'\x1bc\x1b[2J\x1b[1;1H') self._send_rcpts(_utf8_normalize(data, self.utf8decoder)) self.log(data, eventdata=eventdata) self.lasttime = util.monotonic_time() self.feedbuffer(data)
def detected(info): global rechecker global rechecktime # later, manual and CMM discovery may act on SN and/or UUID for service in info['services']: if service in nodehandlers: if service not in known_services: known_services[service] = set([]) handler = nodehandlers[service] info['handler'] = handler break else: # no nodehandler, ignore for now return try: snum = info['attributes']['enclosure-serial-number'][0].strip() if snum: info['serialnumber'] = snum known_serials[info['serialnumber']] = info except (KeyError, IndexError): pass try: info['modelnumber'] = info['attributes'][ 'enclosure-machinetype-model'][0] known_services[service].add(info['modelnumber']) except (KeyError, IndexError): pass if info['hwaddr'] in known_info and 'addresses' in info: # we should tee these up for parsing when an enclosure comes up # also when switch config parameters change, should discard # and there's also if wiring is fixed... # of course could periodically revisit known_nodes # replace potentially stale address info #TODO(jjohnson2): remove this # temporary workaround for XCC not doing SLP DA over dedicated port # bz 93219, fix submitted, but not in builds yet # strictly speaking, going ipv4 only legitimately is mistreated here, # but that should be an edge case oldaddr = known_info[info['hwaddr']].get('addresses', []) for addr in info['addresses']: if addr[0].startswith('fe80::'): break else: for addr in oldaddr: if addr[0].startswith('fe80::'): info['addresses'].append(addr) if known_info[info['hwaddr']].get('addresses', []) == info['addresses']: # if the ip addresses match, then assume no changes # now something resetting to defaults could, in theory # have the same address, but need to be reset # in that case, however, a user can clear pubkeys to force a check return known_info[info['hwaddr']] = info cfg = cfm.ConfigManager(None) if handler: handler = handler.NodeHandler(info, cfg) handler.scan() uuid = info.get('uuid', None) if uuid_is_valid(uuid): known_uuids[uuid][info['hwaddr']] = info if handler and handler.https_supported and not handler.https_cert: if handler.cert_fail_reason == 'unreachable': log.log({ 'info': '{0} with hwaddr {1} is not reachable by https ' 'at address {2}'.format(handler.devname, info['hwaddr'], handler.ipaddr) }) info['addresses'] = [ x for x in info.get('addresses', []) if x != handler.ipaddr ] return log.log({ 'info': '{0} with hwaddr {1} at address {2} is not yet running ' 'https, will examine later'.format(handler.devname, info['hwaddr'], handler.ipaddr) }) if rechecker is not None and rechecktime > util.monotonic_time() + 300: rechecker.cancel() if rechecker is None or rechecker.dead: rechecktime = util.monotonic_time() + 300 rechecker = eventlet.spawn_after(300, _periodic_recheck, cfg) unknown_info[info['hwaddr']] = info info['discostatus'] = 'unidentfied' #TODO, eventlet spawn after to recheck sooner, or somehow else # influence periodic recheck to shorten delay? return nodename, info['maccount'] = get_nodename(cfg, handler, info) if nodename and handler and handler.https_supported: dp = cfg.get_node_attributes([nodename], ('pubkeys.tls_hardwaremanager', )) lastfp = dp.get(nodename, {}).get('pubkeys.tls_hardwaremanager', {}).get('value', None) if util.cert_matches(lastfp, handler.https_cert): info['nodename'] = nodename known_nodes[nodename][info['hwaddr']] = info info['discostatus'] = 'discovered' return # already known, no need for more #TODO(jjohnson2): We might have to get UUID for certain searches... #for now defer probe until inside eval_node. We might not have #a nodename without probe in the future. if nodename and handler: eval_node(cfg, handler, info, nodename) elif handler: log.log({ 'info': 'Detected unknown {0} with hwaddr {1} at ' 'address {2}'.format(handler.devname, info['hwaddr'], handler.ipaddr) }) info['discostatus'] = 'unidentified' unknown_info[info['hwaddr']] = info
def _full_updatemacmap(configmanager): global vintage global _macmap global _nodesbymac global _switchportmap global _macsbyswitch with mapupdating: vintage = util.monotonic_time() # Clear all existing entries _macmap = {} _nodesbymac = {} _switchportmap = {} _macsbyswitch = {} if configmanager.tenant is not None: raise exc.ForbiddenRequest( 'Network topology not available to tenants') nodelocations = configmanager.get_node_attributes( configmanager.list_nodes(), ('net*.switch', 'net*.switchport')) switches = set([]) for node in nodelocations: cfg = nodelocations[node] for attr in cfg: if not attr.endswith('.switch') or 'value' not in cfg[attr]: continue curswitch = cfg[attr].get('value', None) if not curswitch: continue switches.add(curswitch) switchportattr = attr + 'port' if switchportattr in cfg: portname = cfg[switchportattr].get('value', '') if not portname: continue if curswitch not in _switchportmap: _switchportmap[curswitch] = {} if portname in _switchportmap[curswitch]: log.log({ 'error': 'Duplicate switch topology config ' 'for {0} and {1}'.format( node, _switchportmap[curswitch][portname]) }) _switchportmap[curswitch][portname] = None else: _switchportmap[curswitch][portname] = node switchcfg = configmanager.get_node_attributes( switches, ('secret.hardwaremanagementuser', 'secret.snmpcommunity', 'secret.hardwaremanagementpassword'), decrypt=True) switchauth = [] for switch in switches: if not switch: continue switchparms = switchcfg.get(switch, {}) user = None password = switchparms.get('secret.snmpcommunity', {}).get('value', None) if not password: password = switchparms.get('secret.hardwaremanagementpassword', {}).get('value', 'public') user = switchparms.get('secret.hardwaremanagementuser', {}).get('value', None) switchauth.append((switch, password, user)) pool = GreenPool() for ans in pool.imap(_map_switch, switchauth): vintage = util.monotonic_time() yield ans
def _find_service(service, target): net4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) net6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) if target: addrs = socket.getaddrinfo(target, 1900, 0, socket.SOCK_DGRAM) for addr in addrs: host = addr[4][0] if addr[0] == socket.AF_INET: msg = smsg.format(host, service) if not isinstance(msg, bytes): msg = msg.encode('utf8') net4.sendto(msg, addr[4]) elif addr[0] == socket.AF_INET6: host = '[{0}]'.format(host) msg = smsg.format(host, service) if not isinstance(msg, bytes): msg = msg.encode('utf8') net6.sendto(msg, addr[4]) else: net4.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) for idx in util.list_interface_indexes(): net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, idx) try: msg = smsg.format('[{0}]'.format(mcastv6addr), service) if not isinstance(msg, bytes): msg = msg.encode('utf8') net6.sendto(msg, (mcastv6addr, 1900, 0, 0)) except socket.error: # ignore interfaces without ipv6 multicast causing error pass for i4 in util.list_ips(): if 'broadcast' not in i4: continue addr = i4['addr'] bcast = i4['broadcast'] net4.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(addr)) msg = smsg.format(mcastv4addr, service) if not isinstance(msg, bytes): msg = msg.encode('utf8') net4.sendto(msg, (mcastv4addr, 1900)) msg = smsg.format(bcast, service) if not isinstance(msg, bytes): msg = msg.encode('utf8') net4.sendto(msg, (bcast, 1900)) # SSDP by spec encourages responses to spread out over a 3 second interval # hence we must be a bit more patient deadline = util.monotonic_time() + 4 r, _, _ = select.select((net4, net6), (), (), 4) peerdata = {} while r: for s in r: (rsp, peer) = s.recvfrom(9000) neighutil.refresh_neigh() _parse_ssdp(peer, rsp, peerdata) timeout = deadline - util.monotonic_time() if timeout < 0: timeout = 0 r, _, _ = select.select((net4, net6), (), (), timeout) querypool = gp.GreenPool() pooltargs = [] for nid in peerdata: for url in peerdata[nid].get('urls', ()): if url.endswith('/desc.tmpl'): pooltargs.append((url, peerdata[nid])) for pi in querypool.imap(check_cpstorage, pooltargs): if pi is not None: yield pi