def __init__(self, domain, ipaddr, cidrmask='unknown'): 'Construct an IPaddrNode - validating our parameters' GraphNode.__init__(self, domain=domain) if isinstance(ipaddr, str) or isinstance(ipaddr, unicode): ipaddrout = pyNetAddr(str(ipaddr)) else: ipaddrout = ipaddr if isinstance(ipaddrout, pyNetAddr): addrtype = ipaddrout.addrtype() if addrtype == ADDR_FAMILY_IPV4: ipaddrout = ipaddrout.toIPv6() elif addrtype != ADDR_FAMILY_IPV6: raise ValueError('Invalid network address type for IPaddrNode constructor: %s' % str(ipaddrout)) ipaddrout.setport(0) else: raise ValueError('Invalid address type for IPaddrNode constructor: %s type(%s)' % (str(ipaddr), type(ipaddr))) self.ipaddr = unicode(str(ipaddrout)) self.cidrmask = cidrmask if IPaddrNode.StoreHostNames and not hasattr(self, 'hostname'): ip = repr(pyNetAddr(ipaddr)) try: self.hostname = socket.gethostbyaddr(ip)[0] except socket.herror: return
def __init__(self, domain, ipaddr, cidrmask='unknown'): 'Construct an IPaddrNode - validating our parameters' GraphNode.__init__(self, domain=domain) if isinstance(ipaddr, str) or isinstance(ipaddr, unicode): ipaddrout = pyNetAddr(str(ipaddr)) else: ipaddrout = ipaddr if isinstance(ipaddrout, pyNetAddr): addrtype = ipaddrout.addrtype() if addrtype == ADDR_FAMILY_IPV4: ipaddrout = ipaddrout.toIPv6() elif addrtype != ADDR_FAMILY_IPV6: raise ValueError( 'Invalid network address type for IPaddrNode constructor: %s' % str(ipaddrout)) ipaddrout.setport(0) else: raise ValueError( 'Invalid address type for IPaddrNode constructor: %s type(%s)' % (str(ipaddr), type(ipaddr))) self.ipaddr = unicode(str(ipaddrout)) self.cidrmask = cidrmask if IPaddrNode.StoreHostNames and not hasattr(self, 'hostname'): ip = repr(pyNetAddr(ipaddr)) try: self.hostname = socket.gethostbyaddr(ip)[0] except socket.herror: return
def _process_mgmt_addr(self, switch, chassisid, attrs): 'Process the ManagementAddress field in the LLDP packet' # FIXME - not sure if I know how I should do this now - no MAC address for mgmtaddr? mgmtaddr = attrs['ManagementAddress'] mgmtnetaddr = pyNetAddr(mgmtaddr) atype = mgmtnetaddr.addrtype() if atype == ADDR_FAMILY_IPV4 or atype == ADDR_FAMILY_IPV6: # MAC addresses are permitted, but IP addresses are preferred chassisaddr = pyNetAddr(chassisid) chassistype = chassisaddr.addrtype() if chassistype == ADDR_FAMILY_802: # It might be an IP address instead adminnic = self.store.load_or_create(NICNode, domain=switch.domain , macaddr=chassisid, ifname='(adminNIC)') mgmtip = self.store.load_or_create(IPaddrNode, domain=switch.domain , cidrmask='unknown', ipaddr=mgmtaddr) if Store.is_abstract(adminnic) or Store.is_abstract(switch): self.store.relate(switch, CMAconsts.REL_nicowner, adminnic) if Store.is_abstract(mgmtip) or Store.is_abstract(adminnic): self.store.relate(adminnic, CMAconsts.REL_ipowner, mgmtip) else: self.log.info('LLDP ATTRS: %s' % str(attrs)) if mgmtnetaddr != chassisaddr: # Not really sure what I should be doing in this case... self.log.warning( 'Chassis ID [%s] not a MAC addr and not the same as mgmt addr [%s]' % (chassisid, mgmtaddr)) self.log.warning('Chassis ID [%s] != mgmt addr [%s]' % (str(mgmtnetaddr), str(chassisaddr))) elif atype == ADDR_FAMILY_802: mgmtnic = self.store.load_or_create(NICNode, domain=switch.domain , macaddr=mgmtaddr, ifname='(ManagementAddress)') if Store.is_abstract(mgmtnic) or Store.is_abstract(switch): self.store.relate(switch, CMAconsts.REL_nicowner, mgmtnic)
def __init__(self, ipaddr): GraphNode.__init__(self, domain='global') if isinstance(ipaddr, str): ipaddr = pyNetAddr(ipaddr) if isinstance(ipaddr, pyNetAddr): ipaddr = ipaddr.toIPv6() self.ipaddr = str(ipaddr)
def __init__(self, MACaddr): GraphNode.__init__(self, domain='global') mac = pyNetAddr(MACaddr) if mac is None or mac.addrtype() != ADDR_FAMILY_802: raise ValueError('Not a legal MAC address [%s // %s]: %s (%s)' % (MACaddr, str(mac), str(mac.addrtype()), mac.addrlen())) self.MACaddr = str(mac)
def __init__(self, config, dispatch, io=None, encryption_required=True): self.config = config self.encryption_required=encryption_required if io is None: self.io = pyReliableUDP(config, pyPacketDecoder()) else: self.io = io dispatch.setconfig(self.io, config) if not self.io.bindaddr(config[CONFIGNAME_CMAINIT]): raise NameError('Cannot bind to address %s' % (str(config[CONFIGNAME_CMAINIT]))) if not self.io.mcastjoin(pyNetAddr(CMAADDR)): CMAdb.log.warning('Failed to join multicast at %s' % CMAADDR) self.io.setblockio(False) #print "IO[socket=%d,maxpacket=%d] created." \ #% (self.io.fileno(), self.io.getmaxpktsize()) self.dispatcher = dispatch self.iowatch = None self.mainloop = glib.MainLoop() #print >> sys.stderr, ('self.mainloop %s, self.mainloop.mainloop: %s' # % (self.mainloop, self.mainloop.mainloop)) # W0612: unused variable j # pylint: disable=W0612 self.prio_queues = [[] for j in range(PacketListener.LOWEST_PRIO+1)] self.queue_addrs = {} # Indexed by IP addresses - which queue is this IP in?
def __init__(self, domain, ipaddr, port=None, protocol='tcp'): 'Construct an IPtcpportNode - validating our parameters' GraphNode.__init__(self, domain=domain) if isinstance(ipaddr, str) or isinstance(ipaddr, unicode): ipaddr = pyNetAddr(str(ipaddr)) if isinstance(ipaddr, pyNetAddr): if port is None: port = ipaddr.port() if port <= 0 or port >= 65536: raise ValueError('Invalid port for constructor: %s' % str(port)) addrtype = ipaddr.addrtype() if addrtype == ADDR_FAMILY_IPV4: ipaddr = ipaddr.toIPv6() elif addrtype != ADDR_FAMILY_IPV6: raise ValueError( 'Invalid network address type [%s] for constructor: %s' % (addrtype, str(ipaddr))) ipaddr.setport(0) ipaddr = unicode(str(ipaddr)) else: raise ValueError( 'Invalid address type for constructor: %s type(%s)' % (str(ipaddr), type(ipaddr))) self.ipaddr = ipaddr self.port = port self.protocol = protocol self.ipport = self.format_ipport()
def _add_serveripportnodes(self, drone, ip, port, processnode, allourips): '''We create tcpipports objects that correspond to the given json object in the context of the set of IP addresses that we support - including support for the ANY ipv4 and ipv6 addresses''' netaddr = pyNetAddr(str(ip)).toIPv6() if netaddr.islocal(): self.log.warning('add_serveripportnodes("%s"): address is local' % netaddr) return addr = str(netaddr) # Were we given the ANY address? anyaddr = netaddr.isanyaddr() for ipaddr in allourips: if not anyaddr and str(ipaddr.ipaddr) != addr: continue ip_port = self.store.load_or_create(IPtcpportNode, domain=drone.domain, ipaddr=ipaddr.ipaddr, port=port) assert hasattr(ip_port, '_Store__store_node') self.store.relate_new(processnode, CMAconsts.REL_tcpservice, ip_port) assert hasattr(ipaddr, '_Store__store_node') self.store.relate_new(ip_port, CMAconsts.REL_baseip, ipaddr) if not anyaddr: return if not anyaddr: print >> sys.stderr, ( 'LOOKING FOR %s in: %s' % (netaddr, [str(ip.ipaddr) for ip in allourips])) raise ValueError( 'IP Address mismatch for Drone %s - could not find address %s' % (drone, addr))
def __init__(self, domain, ipaddr, port=None, protocol='tcp'): 'Construct an IPtcpportNode - validating our parameters' GraphNode.__init__(self, domain=domain) if isinstance(ipaddr, str) or isinstance(ipaddr, unicode): ipaddr = pyNetAddr(str(ipaddr)) if isinstance(ipaddr, pyNetAddr): if port is None: port = ipaddr.port() if port <= 0 or port >= 65536: raise ValueError('Invalid port for constructor: %s' % str(port)) addrtype = ipaddr.addrtype() if addrtype == ADDR_FAMILY_IPV4: ipaddr = ipaddr.toIPv6() elif addrtype != ADDR_FAMILY_IPV6: raise ValueError('Invalid network address type [%s] for constructor: %s' % (addrtype, str(ipaddr))) ipaddr.setport(0) ipaddr = unicode(str(ipaddr)) else: raise ValueError('Invalid address type for constructor: %s type(%s)' % (str(ipaddr), type(ipaddr))) self.ipaddr = ipaddr self.port = port self.protocol = protocol self.ipport = self.format_ipport()
def _add_serveripportnodes(self, drone, ip, port, processnode, allourips): '''We create tcpipports objects that correspond to the given json object in the context of the set of IP addresses that we support - including support for the ANY ipv4 and ipv6 addresses''' netaddr = pyNetAddr(str(ip)).toIPv6() if netaddr.islocal(): self.log.warning('add_serveripportnodes("%s"): address is local' % netaddr) return addr = str(netaddr) # Were we given the ANY address? anyaddr = netaddr.isanyaddr() for ipaddr in allourips: if not anyaddr and str(ipaddr.ipaddr) != addr: continue ip_port = self.store.load_or_create(IPtcpportNode, domain=drone.domain , ipaddr=ipaddr.ipaddr, port=port) assert hasattr(ip_port, '_Store__store_node') self.store.relate_new(processnode, CMAconsts.REL_tcpservice, ip_port) assert hasattr(ipaddr, '_Store__store_node') self.store.relate_new(ip_port, CMAconsts.REL_baseip, ipaddr) if not anyaddr: return if not anyaddr: print >> sys.stderr, ('LOOKING FOR %s in: %s' % (netaddr, [str(ip.ipaddr) for ip in allourips])) raise ValueError('IP Address mismatch for Drone %s - could not find address %s' % (drone, addr))
def _add_serveripportnodes(self, drone, ip, port, processnode, allourips): '''We create tcpipports objects that correspond to the given json object in the context of the set of IP addresses that we support - including support for the ANY ipv4 and ipv6 addresses''' netaddr = pyNetAddr(str(ip)).toIPv6() if netaddr.islocal(): self.log.warning('add_serveripportnodes("%s"): address is local' % netaddr) return addr = str(netaddr) # Were we given the ANY address? anyaddr = netaddr.isanyaddr() for ipaddr in allourips: if not anyaddr and str(ipaddr.ipaddr) != addr: continue ip_port = self.store.load_or_create(IPtcpportNode, domain=drone.domain , ipaddr=ipaddr.ipaddr, port=port) assert hasattr(ip_port, '_Store__store_node') self.store.relate_new(processnode, CMAconsts.REL_tcpservice, ip_port) assert hasattr(ipaddr, '_Store__store_node') self.store.relate_new(ip_port, CMAconsts.REL_baseip, ipaddr) if not anyaddr: return if not anyaddr: print >> sys.stderr, ('LOOKING FOR %s (%s, %s) in: %s' % (netaddr, type(ip), type(netaddr), [str(ip.ipaddr) for ip in allourips])) #raise ValueError('IP Address mismatch for Drone %s - could not find address %s' #% (drone, addr)) # Must not have been discovered yet. Hopefully discovery will come along and # fill in the cidrmask, and create the NIC relationship ;-) ipnode = self.store.load_or_create(IPaddrNode, domain=drone.domain, ipaddr=addr) allourips.append(ipnode) self._add_serveripportnodes(drone, addr, port, processnode, allourips)
def start_heartbeat(self, ring, partner1, partner2=None): '''Start heartbeating to the given partners. We insert ourselves between partner1 and partner2. We only use forward links - because we can follow them in both directions in Neo4J. So, we need to create a forward link from partner1 to us and from us to partner2 (if any) ''' ouraddr = pyNetAddr(self.select_ip(), port=self.port) partner1addr = pyNetAddr(partner1.select_ip(ring), port=partner1.port) if partner2 is not None: partner2addr = pyNetAddr(partner2.select_ip(ring), port=partner2.port) else: partner2addr = None if CMAdb.debug: CMAdb.log.debug('STARTING heartbeat(s) from %s [%s] to %s [%s] and %s [%s]' % (self, ouraddr, partner1, partner1addr, partner2, partner2addr)) self.send_hbmsg(ouraddr, FrameSetTypes.SENDEXPECTHB, (partner1addr, partner2addr))
def request_discovery(self, args): ##< A vector of arguments containing '''Send our drone a request to perform discovery We send a DISCNAME frame with the instance name then an optional DISCINTERVAL frame with the repeat interval then a DISCJSON frame with the JSON data for the discovery operation. Our argument is a vector of pyConfigContext objects with values for 'instance' Name of this discovery instance 'interval' How often to repeat this discovery action 'timeout' How long to wait before considering this discovery failed... ''' #fs = pyFrameSet(FrameSetTypes.DODISCOVER) frames = [] for arg in args: agent_params = ConfigFile.agent_params(CMAdb.io.config, 'discovery', arg[CONFIGNAME_TYPE], self.designation) for key in ('repeat', 'warn' 'timeout', 'nice'): if key in agent_params and key not in arg: arg[key] = agent_params[arg] instance = arg['instance'] frames.append({'frametype': FrameTypes.DISCNAME, 'framevalue': instance}) if 'repeat' in arg: interval = int(arg['repeat']) frames.append({'frametype': FrameTypes.DISCINTERVAL, 'framevalue': int(interval)}) frames.append({'frametype': FrameTypes.DISCJSON, 'framevalue': str(arg)}) # This doesn't work if the client has bound to a VIP ourip = self.select_ip() # meaning select our primary IP ourip = pyNetAddr(ourip) if ourip.port() == 0: ourip.setport(self.port) #print >> sys.stderr, ('ADDING PACKET TO TRANSACTION: %s', str(frames)) if CMAdb.debug: CMAdb.log.debug('Sending Discovery request(%s, %s) to %s Frames: %s' % (instance, str(interval), str(ourip), str(frames))) CMAdb.transaction.add_packet(ourip, FrameSetTypes.DODISCOVER, frames)
def __init__(self, config, dispatch, io=None, encryption_required=True): self.config = config self.encryption_required = encryption_required if io is None: self.io = pyReliableUDP(config, pyPacketDecoder()) else: self.io = io dispatch.setconfig(self.io, config) if not self.io.bindaddr(config[CONFIGNAME_CMAINIT]): raise NameError("Cannot bind to address %s" % (str(config[CONFIGNAME_CMAINIT]))) if not self.io.mcastjoin(pyNetAddr(CMAADDR)): CMAdb.log.warning("Failed to join multicast at %s" % CMAADDR) self.io.setblockio(False) # print "IO[socket=%d,maxpacket=%d] created." \ #% (self.io.fileno(), self.io.getmaxpktsize()) self.dispatcher = dispatch self.iowatch = None self.mainloop = glib.MainLoop() # print >> sys.stderr, ('self.mainloop %s, self.mainloop.mainloop: %s' # % (self.mainloop, self.mainloop.mainloop)) # W0612: unused variable j # pylint: disable=W0612 self.prio_queues = [[] for j in range(PacketListener.LOWEST_PRIO + 1)] self.queue_addrs = {} # Indexed by IP addresses - which queue is this IP in?
def stop_heartbeat(self, ring, partner1, partner2=None): '''Stop heartbeating to the given partners.' We don't know which node is our forward link and which our back link, but we need to remove them either way ;-). ''' ouraddr = pyNetAddr(self.select_ip(), port=self.port) partner1addr = pyNetAddr(partner1.select_ip(ring), port=partner1.port) if partner2 is not None: partner2addr = pyNetAddr(partner2.select_ip(ring), port=partner2.port) else: partner2addr = None # Stop sending the heartbeat messages between these (former) peers if CMAdb.debug: CMAdb.log.debug('STOPPING heartbeat(s) from %s [%s] to %s [%s] and %s [%s]' % (self, ouraddr, partner1, partner1addr, partner2, partner2addr)) self.send_hbmsg(ouraddr, FrameSetTypes.STOPSENDEXPECTHB, (partner1addr, partner2addr))
def validate_source_ip(sysname, origaddr, jsobj, listenaddr): ''' This chunk of code is kinda stupid... There is a docker/NAT bug where it screws up the source address of multicast packets This code detects that that has happened and works around it... ''' # Local addresses aren't NATted, but the code below will think so... if origaddr.islocal(): return origaddr, False match = False isNAT = False jsdata = jsobj['data'] canonorig = str(pyNetAddr(origaddr).toIPv6()) primaryip = None for ifname in jsdata: for ip_netmask in jsdata[ifname]['ipaddrs']: ip = ip_netmask.split('/')[0] canonip = pyNetAddr(ip, origaddr.port()).toIPv6() if str(canonip) == canonorig: match = True break ipinfo = jsdata[ifname]['ipaddrs'][ip_netmask] if 'default_gw' in jsdata[ifname] and ipinfo.get( 'name') == ifname: primaryip = canonip # FIXME: This currently is set up to work around gratuitous NATting in Docker (bug!) # It should evolve to do the right things for real NAT configurations... if not match: CMAdb.log.warning( 'Drone %s sent STARTUP packet with NATted source address (%s)' % (sysname, origaddr)) isNAT = True if primaryip is not None: if CMAdb.running_under_docker(): CMAdb.log.warning( 'Drone %s STARTUP orig address assumed to be (%s)' % (sysname, primaryip)) CMAdb.log.warning( 'Presumed to be due to a known Docker bug.') origaddr = primaryip if listenaddr is not None and primaryip.port( ) != listenaddr.port(): CMAdb.log.warning( 'Drone %s STARTUP port is NATted: Assumed to be (%s)' % (sysname, listenaddr.port())) origaddr = pyNetAddr(origaddr, port=listenaddr.port()) return origaddr, isNAT
def __init__(self, MACaddr): GraphNode.__init__(self, domain='global') mac = pyNetAddr(MACaddr) if mac is None or mac.addrtype() != ADDR_FAMILY_802: raise ValueError( 'Not a legal MAC address [%s // %s]: %s (%s)' % (MACaddr, str(mac), str(mac.addrtype()), mac.addrlen())) self.MACaddr = str(mac)
def _add_clientipportnode(self, drone, ipaddr, servport, processnode): '''Add the information for a single client IPtcpportNode to the database.''' servip_name = str(pyNetAddr(ipaddr).toIPv6()) servip = self.store.load_or_create(IPaddrNode, domain=drone.domain, ipaddr=servip_name) ip_port = self.store.load_or_create(IPtcpportNode, domain=drone.domain , ipaddr=servip_name, port=servport) self.store.relate_new(ip_port, CMAconsts.REL_baseip, servip) self.store.relate_new(ip_port, CMAconsts.REL_tcpclient, processnode)
def __init__(self, domain, macaddr, ifname=None): GraphNode.__init__(self, domain=domain) mac = pyNetAddr(macaddr) if mac is None or mac.addrtype() != ADDR_FAMILY_802: raise ValueError('Not a legal MAC address [%s]' % macaddr) self.macaddr = str(mac) if ifname is not None: self.ifname = ifname
def _validate_macaddr(name, paraminfo, value): 'Validate an MAC address value' mac = pyNetAddr(value) if mac is None: raise ValueError('value of %s [%s] not a valid MAC address' % (name, value)) if mac.addrtype() != ADDR_FAMILY_802: raise ValueError('Value of %s [%s] not a MAC address' % (name, value)) return str(mac)
def _regexmatch(key): '''Handy internal function to pull out the IP and port into a pyNetAddr Note that the format is the format used in the discovery information which in turn is the format used by netstat. This is not a "standard" format, but it's what netstat uses - so it's what we use. ''' mobj = ipportregex.match(key) if mobj is None: return None (ip, port) = mobj.groups() ipport = pyNetAddr(ip, port=int(port)) if ipport.isanyaddr(): if ipport.addrtype() == ADDR_FAMILY_IPV4: ipport = pyNetAddr('127.0.0.1', port=ipport.port()) else: ipport = pyNetAddr('::1', port=ipport.port()) return ipport
def regexmatch(key): '''Handy internal function to pull out the IP and port into a pyNetAddr Note that the format is the format used in the discovery information which in turn is the format used by netstat. This is not a "standard" format, but it's what netstat uses - so it's what we use. ''' mobj = ipportregex.match(key) if mobj is None: return None (ip, port) = mobj.groups() ipport = pyNetAddr(ip, port=int(port)) if ipport.isanyaddr(): if ipport.addrtype() == ADDR_FAMILY_IPV4: ipport = pyNetAddr('127.0.0.1', port=ipport.port()) else: ipport = pyNetAddr('::1', port=ipport.port()) return ipport
def _commit_network_trans(self, io): ''' Commit the network portion of our transaction - that is, send the packets! One interesting thing - we should probably not consider this transaction fully completed until we decide each destination is dead, or until its packets are all ACKed. @TODO: We don't yet cover with CMA crashing before all packets are received versus sent -- That is, if they get lost between sending by the CMA and receiving by the nanoprobes. This argues for doing the network portion of the transaction first - presuming we do the db and network portions sequentially -- Of course, no transaction can start until the previous one is finished. ''' #print >> sys.stderr, "PACKET JSON IS >>>%s<<<" % self.tree['packets'] for packet in self.tree['packets']: dest = packet['destaddr'] fs = pyFrameSet(packet['action']) if packet['action'] == FrameSetTypes.STARTUP: raise ValueError('Packet is a STARTUP packet %s to %s' % (str(packet), dest)) #from cmadb import CMAdb #CMAdb.log.info('SENDING PACKET: %s' % str(packet)) for frame in packet['frames']: ftype = frame['frametype'] fvalue = frame['framevalue'] # The number of cases below will have to grow over time. # but this code is pretty simple so far... if ftype == FrameTypes.IPPORT: if isinstance(fvalue, str) or isinstance(fvalue, unicode): fvalue = pyNetAddr(fvalue) aframe = pyIpPortFrame(ftype, fvalue) fs.append(aframe) elif ftype == FrameTypes.DISCNAME or ftype == FrameTypes.DISCJSON \ or ftype == FrameTypes.CONFIGJSON or ftype == FrameTypes.RSCJSON: sframe = pyCstringFrame(ftype) sframe.setvalue(str(fvalue)) fs.append(sframe) elif ftype == FrameTypes.DISCINTERVAL: nframe = pyIntFrame(ftype, intbytes=4, initval=int(fvalue)) fs.append(nframe) else: raise ValueError('Unrecognized frame type [%s]: %s' % (ftype, frame)) # In theory we could optimize multiple FrameSets in a row being sent to the # same address, but we can always do that later... io.sendreliablefs(dest, (fs, )) if False: if packet['action'] == FrameSetTypes.SETCONFIG: print >> sys.stderr, ( "LOGGING SETCONFIG CONNECTION TO %s" % str(dest)) if hasattr(io, 'log_conn' ): # Some of our test code doesn't have this io.log_conn(dest)
def _validate_ipaddr(name, paraminfo, value): 'Validate an IP address value' ip = pyNetAddr(value) if ip is None: raise ValueError('Value of %s [%s] not a valid IP address' % (name, value)) ip.setport(0) if ip.addrtype() == ADDR_FAMILY_IPV6: return str(ip) if ip.addrtype() == ADDR_FAMILY_IPV4: return str(ip.toIPv6()) raise ValueError('Value of %s [%s] not an IP address' % (name, value))
def send_frames(self, framesettype, frames): 'Send messages to our real concrete Drone system...' # This doesn't work if the client has bound to a VIP ourip = pyNetAddr(self.select_ip()) # meaning select our primary IP if ourip.port() == 0: ourip.setport(self.port) #print >> sys.stderr, ('ADDING PACKET TO TRANSACTION: %s', str(frames)) if CMAdb.debug: CMAdb.log.debug('Sending request to %s Frames: %s' % (str(ourip), str(frames))) CMAdb.transaction.add_packet(ourip, framesettype, frames)
def validate_source_ip(sysname, origaddr, jsobj, listenaddr): ''' This chunk of code is kinda stupid... There is a docker/NAT bug where it screws up the source address of multicast packets This code detects that that has happened and works around it... ''' # Local addresses aren't NATted, but the code below will think so... if origaddr.islocal(): return origaddr, False match = False isNAT = False jsdata = jsobj['data'] canonorig = str(pyNetAddr(origaddr).toIPv6()) primaryip = None for ifname in jsdata: for ip_netmask in jsdata[ifname]['ipaddrs']: ip = ip_netmask.split('/')[0] canonip = pyNetAddr(ip, origaddr.port()).toIPv6() if str(canonip) == canonorig: match = True break ipinfo = jsdata[ifname]['ipaddrs'][ip_netmask] if 'default_gw' in jsdata[ifname] and ipinfo.get('name') == ifname: primaryip = canonip # FIXME: This currently is set up to work around gratuitous NATting in Docker (bug!) # It should evolve to do the right things for real NAT configurations... if not match: CMAdb.log.warning('Drone %s sent STARTUP packet with NATted source address (%s)' % (sysname, origaddr)) isNAT = True if primaryip is not None: if CMAdb.running_under_docker(): CMAdb.log.warning('Drone %s STARTUP orig address assumed to be (%s)' % (sysname, primaryip)) CMAdb.log.warning('Presumed to be due to a known Docker bug.') origaddr = primaryip if listenaddr is not None and primaryip.port() != listenaddr.port(): CMAdb.log.warning('Drone %s STARTUP port is NATted: Assumed to be (%s)' % (sysname, listenaddr.port())) origaddr = pyNetAddr(origaddr, port=listenaddr.port()) return origaddr, isNAT
def testme(): 'This is a string' from AssimCtypes import CONFIGNAME_OUTSIG from AssimCclasses import pyReliableUDP, pyPacketDecoder, pySignFrame config = pyConfigContext(init={CONFIGNAME_OUTSIG: pySignFrame(1)}) io = pyReliableUDP(config, pyPacketDecoder()) trans = Transaction(encryption_required=False) destaddr = pyNetAddr('10.10.10.1:1984') addresses = (pyNetAddr('10.10.10.5:1984'), pyNetAddr('10.10.10.6:1984')) trans.add_packet(destaddr, FrameSetTypes.SENDEXPECTHB, addresses, frametype=FrameTypes.IPPORT) trans.add_packet( pyNetAddr('10.10.10.1:1984'), FrameSetTypes.SENDEXPECTHB, (pyNetAddr('10.10.10.5:1984'), pyNetAddr('10.10.10.6:1984')), frametype=FrameTypes.IPPORT) print >> sys.stderr, 'JSON: %s\n' % str(trans) print >> sys.stderr, 'JSON: %s\n' % str(pyConfigContext(str(trans))) trans.commit_trans(io)
def testme(): 'This is a string' from AssimCtypes import CONFIGNAME_OUTSIG from AssimCclasses import pyReliableUDP, pyPacketDecoder, pySignFrame config = pyConfigContext(init={CONFIGNAME_OUTSIG: pySignFrame(1)}) io = pyReliableUDP(config, pyPacketDecoder()) trans = Transaction(encryption_required=False) destaddr = pyNetAddr('10.10.10.1:1984') addresses = (pyNetAddr('10.10.10.5:1984'), pyNetAddr('10.10.10.6:1984')) trans.add_packet(destaddr, FrameSetTypes.SENDEXPECTHB , addresses, frametype=FrameTypes.IPPORT) assert len(trans.tree['packets']) == 1 trans.add_packet(pyNetAddr('10.10.10.1:1984') , FrameSetTypes.SENDEXPECTHB , (pyNetAddr('10.10.10.5:1984') , pyNetAddr('10.10.10.6:1984')) , frametype=FrameTypes.IPPORT) assert len(trans.tree['packets']) == 2 print >> sys.stderr, 'JSON: %s\n' % str(trans) print >> sys.stderr, 'JSON: %s\n' % str(pyConfigContext(str(trans))) trans.commit_trans(io) assert len(trans.tree['packets']) == 0
def _commit_network_trans(self, io): ''' Commit the network portion of our transaction - that is, send the packets! One interesting thing - we should probably not consider this transaction fully completed until we decide each destination is dead, or until its packets are all ACKed. @TODO: We don't yet cover with CMA crashing before all packets are received versus sent -- That is, if they get lost between sending by the CMA and receiving by the nanoprobes. This argues for doing the network portion of the transaction first - presuming we do the db and network portions sequentially -- Of course, no transaction can start until the previous one is finished. ''' #print >> sys.stderr, "PACKET JSON IS >>>%s<<<" % self.tree['packets'] for packet in self.tree['packets']: dest = packet['destaddr'] fs = pyFrameSet(packet['action']) if packet['action'] == FrameSetTypes.STARTUP: raise ValueError('Packet is a STARTUP packet %s to %s' % (str(packet), dest)) #from cmadb import CMAdb #CMAdb.log.info('SENDING PACKET: %s' % str(packet)) for frame in packet['frames']: ftype = frame['frametype'] fvalue = frame['framevalue'] # The number of cases below will have to grow over time. # but this code is pretty simple so far... if ftype == FrameTypes.IPPORT: if isinstance(fvalue, str) or isinstance(fvalue, unicode): fvalue = pyNetAddr(fvalue) aframe = pyIpPortFrame(ftype, fvalue) fs.append(aframe) elif ftype == FrameTypes.DISCNAME or ftype == FrameTypes.DISCJSON \ or ftype == FrameTypes.CONFIGJSON or ftype == FrameTypes.RSCJSON: sframe = pyCstringFrame(ftype) sframe.setvalue(str(fvalue)) fs.append(sframe) elif ftype == FrameTypes.DISCINTERVAL: nframe = pyIntFrame(ftype, intbytes=4, initval=int(fvalue)) fs.append(nframe) else: raise ValueError('Unrecognized frame type [%s]: %s' % (ftype, frame)) # In theory we could optimize multiple FrameSets in a row being sent to the # same address, but we can always do that later... io.sendreliablefs(dest, (fs, )) if False: if packet['action'] == FrameSetTypes.SETCONFIG: print >> sys.stderr, ("LOGGING SETCONFIG CONNECTION TO %s" % str(dest)) if hasattr(io, 'log_conn'): # Some of our test code doesn't have this io.log_conn(dest)
def _process_mgmt_addr(self, switch, chassisid, attrs): 'Process the ManagementAddress field in the LLDP packet' # FIXME - not sure if I know how I should do this now - no MAC address for mgmtaddr? mgmtaddr = attrs['ManagementAddress'] mgmtnetaddr = pyNetAddr(mgmtaddr) atype = mgmtnetaddr.addrtype() if atype == ADDR_FAMILY_IPV4 or atype == ADDR_FAMILY_IPV6: # MAC addresses are permitted, but IP addresses are preferred chassisaddr = pyNetAddr(chassisid) chassistype = chassisaddr.addrtype() if chassistype == ADDR_FAMILY_802: # It might be an IP address instead adminnic = self.store.load_or_create(NICNode, domain=switch.domain, macaddr=chassisid, ifname='(adminNIC)') mgmtip = self.store.load_or_create(IPaddrNode, domain=switch.domain, cidrmask='unknown', ipaddr=mgmtaddr) if Store.is_abstract(adminnic) or Store.is_abstract(switch): self.store.relate(switch, CMAconsts.REL_nicowner, adminnic) if Store.is_abstract(mgmtip) or Store.is_abstract(adminnic): self.store.relate(adminnic, CMAconsts.REL_ipowner, mgmtip) else: self.log.info('LLDP ATTRS: %s' % str(attrs)) if mgmtnetaddr != chassisaddr: # Not really sure what I should be doing in this case... self.log.warning( 'Chassis ID [%s] not a MAC addr and not the same as mgmt addr [%s]' % (chassisid, mgmtaddr)) self.log.warning('Chassis ID [%s] != mgmt addr [%s]' % (str(mgmtnetaddr), str(chassisaddr))) elif atype == ADDR_FAMILY_802: mgmtnic = self.store.load_or_create(NICNode, domain=switch.domain, macaddr=mgmtaddr, ifname='(ManagementAddress)') if Store.is_abstract(mgmtnic) or Store.is_abstract(switch): self.store.relate(switch, CMAconsts.REL_nicowner, mgmtnic)
def __init__(self, domain, macaddr, ifname=None, json=None): GraphNode.__init__(self, domain=domain) mac = pyNetAddr(macaddr) if mac is None or mac.addrtype() != ADDR_FAMILY_802: raise ValueError('Not a legal MAC address [%s]' % macaddr) self.macaddr = str(mac) if ifname is not None: self.ifname = ifname if json is not None: self.json = json self._json = pyConfigContext(json) for attr in ('carrier', 'duplex', 'MTU', 'operstate', 'speed'): if attr in self._json: setattr(self, attr, self._json[attr]) if not hasattr(self, 'OUI'): oui = self.mac_to_oui(self.macaddr) if oui is not None: self.OUI = oui
def __init__(self, domain, ipaddr, cidrmask='unknown'): 'Construct an IPaddrNode - validating our parameters' GraphNode.__init__(self, domain=domain) if isinstance(ipaddr, str) or isinstance(ipaddr, unicode): ipaddr = pyNetAddr(str(ipaddr)) if isinstance(ipaddr, pyNetAddr): addrtype = ipaddr.addrtype() if addrtype == ADDR_FAMILY_IPV4: ipaddr = ipaddr.toIPv6() elif addrtype != ADDR_FAMILY_IPV6: raise ValueError('Invalid network address type for IPaddrNode constructor: %s' % str(ipaddr)) ipaddr.setport(0) ipaddr = unicode(str(ipaddr)) else: raise ValueError('Invalid address type for IPaddrNode constructor: %s type(%s)' % (str(ipaddr), type(ipaddr))) self.ipaddr = ipaddr self.cidrmask = cidrmask
def __init__(self, domain, ipaddr, cidrmask='unknown'): 'Construct an IPaddrNode - validating our parameters' GraphNode.__init__(self, domain=domain) if isinstance(ipaddr, str) or isinstance(ipaddr, unicode): ipaddr = pyNetAddr(str(ipaddr)) if isinstance(ipaddr, pyNetAddr): addrtype = ipaddr.addrtype() if addrtype == ADDR_FAMILY_IPV4: ipaddr = ipaddr.toIPv6() elif addrtype != ADDR_FAMILY_IPV6: raise ValueError( 'Invalid network address type for IPaddrNode constructor: %s' % str(ipaddr)) ipaddr.setport(0) ipaddr = unicode(str(ipaddr)) else: raise ValueError( 'Invalid address type for IPaddrNode constructor: %s type(%s)' % (str(ipaddr), type(ipaddr))) self.ipaddr = ipaddr self.cidrmask = cidrmask
def _fixup(value): 'Fix up our values for printing neatly in minimal space' if isinstance(value, unicode): if value.startswith('::'): try: ip=pyNetAddr(value) value=repr(ip) except ValueError: pass return str(value).strip() elif isinstance(value, str): return value elif hasattr(value, '__iter__'): ret = '[' prefix = '' for item in value: ret += '%s%s' % (prefix, str(item).strip()) prefix=', ' ret += ']' return ret return str(value)
def _fixup(value): 'Fix up our values for printing neatly in minimal space' if isinstance(value, unicode): if value.startswith('::'): try: ip = pyNetAddr(value) value = repr(ip) except ValueError: pass return str(value).strip() elif isinstance(value, str): return value elif hasattr(value, '__iter__'): ret = '[' prefix = '' for item in value: ret += '%s%s' % (prefix, str(item).strip()) prefix = ', ' ret += ']' return ret return str(value)
def death_report(self, status, reason, fromaddr, frameset): 'Process a death/shutdown report for us. RIP us.' from hbring import HbRing frameset = frameset # We don't use the frameset at this point in time if reason != 'HBSHUTDOWN': if self.status != status or self.reason != reason: CMAdb.log.info( 'Node %s has been reported as %s by address %s. Reason: %s' % (self.designation, status, str(fromaddr), reason)) oldstatus = self.status self.status = status self.reason = reason self.monitors_activated = False self.time_status_ms = int(round(time.time() * 1000)) self.time_status_iso8601 = time.strftime('%Y-%m-%d %H:%M:%S') if status == oldstatus: # He was already dead, Jim. return # There is a need for us to be a little more sophisticated # in terms of the number of peers this particular drone had # It's here in this place that we will eventually add the ability # to distinguish death of a switch or subnet or site from death of a single drone for mightbering in CMAdb.store.load_in_related(self, None, nodeconstructor): if isinstance(mightbering, HbRing): mightbering.leave(self) deadip = pyNetAddr(self.select_ip(), port=self.port) if CMAdb.debug: CMAdb.log.debug('Closing connection to %s/%d' % (deadip, DEFAULT_FSP_QID)) # # So, if this is a death report from another system we could shut down ungracefully # and it would be OK. # # But if it's a graceful shutdown, we need to not screw up the comm shutdown in progress # If it's broken, our tests and the real world will eventually show that up :-D. # if reason != 'HBSHUTDOWN': self._io.closeconn(DEFAULT_FSP_QID, deadip) AssimEvent(self, AssimEvent.OBJDOWN)
def death_report(self, status, reason, fromaddr, frameset): 'Process a death/shutdown report for us. RIP us.' from hbring import HbRing frameset = frameset # We don't use the frameset at this point in time if reason != 'HBSHUTDOWN': if self.status != status or self.reason != reason: CMAdb.log.info('Node %s has been reported as %s by address %s. Reason: %s' % (self.designation, status, str(fromaddr), reason)) oldstatus = self.status self.status = status self.reason = reason self.monitors_activated = False self.time_status_ms = int(round(time.time() * 1000)) self.time_status_iso8601 = time.strftime('%Y-%m-%d %H:%M:%S') if status == oldstatus: # He was already dead, Jim. return # There is a need for us to be a little more sophisticated # in terms of the number of peers this particular drone had # It's here in this place that we will eventually add the ability # to distinguish death of a switch or subnet or site from death of a single drone for mightbering in CMAdb.store.load_in_related(self, None, nodeconstructor): if isinstance(mightbering, HbRing): mightbering.leave(self) deadip = pyNetAddr(self.select_ip(), port=self.port) if CMAdb.debug: CMAdb.log.debug('Closing connection to %s/%d' % (deadip, DEFAULT_FSP_QID)) # # So, if this is a death report from another system we could shut down ungracefully # and it would be OK. # # But if it's a graceful shutdown, we need to not screw up the comm shutdown in progress # If it's broken, our tests and the real world will eventually show that up :-D. # if reason != 'HBSHUTDOWN': self._io.closeconn(DEFAULT_FSP_QID, deadip) AssimEvent(self, AssimEvent.OBJDOWN)
def dispatch(self, origaddr, frameset): json = None addrstr = repr(origaddr) fstype = frameset.get_framesettype() localtime = None listenaddr = None keyid = None pubkey = None keysize = None #print >> sys.stderr, ("DispatchSTARTUP: received [%s] FrameSet from [%s]" #% (FrameSetTypes.get(fstype)[0], addrstr)) if CMAdb.debug: CMAdb.log.debug( "DispatchSTARTUP: received [%s] FrameSet from [%s]" % (FrameSetTypes.get(fstype)[0], addrstr)) if not self.io.connactive(origaddr): self.io.closeconn(DEFAULT_FSP_QID, origaddr) CMAdb.transaction.post_transaction_packets.append( FrameSetTypes.ACKSTARTUP) for frame in frameset.iter(): frametype = frame.frametype() if frametype == FrameTypes.WALLCLOCK: localtime = str(frame.getint()) elif frametype == FrameTypes.IPPORT: listenaddr = frame.getnetaddr() elif frametype == FrameTypes.HOSTNAME: sysname = frame.getstr() if sysname == CMAdb.nodename: if origaddr.islocal(): CMAdb.log.info( "Received STARTUP from local system (%s)" % addrstr) else: addresses = ['127.0.0.1', '::ffff:127.0.0.1', '::1'] for address in addresses: localhost = pyNetAddr(address) self.io.addalias(localhost, origaddr) CMAdb.log.info("Aliasing %s to %s" % (localhost, origaddr)) elif frametype == FrameTypes.JSDISCOVER: json = frame.getstr() #print >> sys.stderr, 'GOT JSDISCOVER JSON: [%s] (strlen:%s,framelen:%s)' \ #% (json, len(json), frame.framelen()) elif frametype == FrameTypes.KEYID: keyid = frame.getstr() elif frametype == FrameTypes.PUBKEYCURVE25519: pubkey = frame.framevalue() keysize = frame.framelen() joininfo = pyConfigContext(init=json) origaddr, isNAT = self.validate_source_ip(sysname, origaddr, joininfo, listenaddr) CMAdb.log.info( 'Drone %s registered from address %s (%s) port %s, key_id %s' % (sysname, origaddr, addrstr, origaddr.port(), keyid)) drone = self.droneinfo.add(sysname, 'STARTUP packet', port=origaddr.port(), primary_ip_addr=str(origaddr)) drone.listenaddr = str(listenaddr) # Seems good to hang onto this... drone.isNAT = isNAT # ditto... if CMAdb.debug: CMAdb.log.debug('DRONE select_ip() result: %s' % (drone.select_ip())) CMAdb.log.debug('DRONE listenaddr: %s' % (drone.listenaddr)) CMAdb.log.debug('DRONE port: %s (%s)' % (drone.port, type(drone.port))) # Did they give us the crypto info we need? if keyid is None or pubkey is None: if CMAdb.debug: CMAdb.log.debug( 'Drone %s registered with keyid %s and pubkey provided: %s' % (self, keyid, pubkey is not None)) else: if drone.key_id == '': if not keyid.startswith(sysname + "@@"): CMAdb.log.warning( "Drone %s wants to register with key_id %s -- permitted.", sysname, keyid) if not cryptcurve25519_save_public_key(keyid, pubkey, keysize): raise ValueError( "Drone %s public key (key_id %s, %d bytes) is invalid." % (sysname, keyid, keysize)) elif drone.key_id != keyid: raise ValueError( "Drone %s tried to register with key_id %s instead of %s." % (sysname, keyid, drone.key_id)) drone.set_crypto_identity(keyid=keyid) pyCryptFrame.dest_set_key_id(origaddr, keyid) # # THIS IS HERE BECAUSE OF A PROTOCOL BUG... # @FIXME Protocol bug when starting up a connection if our first (this) packet gets lost, # then the protocol doesn't retransmit it. # More specifically, it seems to clear it out of the queue. # This might be CMA bug or a protocol bug. It's not clear... # The packet goes into the queue, but if that packet is lost in transmission, then when # we come back around here, it's not in the queue any more, even though it # definitely wasn't ACKed. # Once this is fixed, this "add_packet" call needs to go *after* the 'if' statement below. # CMAdb.transaction.add_packet(origaddr, FrameSetTypes.SETCONFIG, (str(self.config), ), FrameTypes.CONFIGJSON) if (localtime is not None): if (drone.lastjoin == localtime): CMAdb.log.warning('Drone %s [%s] sent duplicate STARTUP' % (sysname, origaddr)) if CMAdb.debug: self.io.log_conn(origaddr) return drone.lastjoin = localtime #print >> sys.stderr, 'DRONE from find: ', drone, type(drone), drone.port drone.startaddr = str(origaddr) if json is not None: drone.logjson(origaddr, json) if CMAdb.debug: CMAdb.log.debug('Joining TheOneRing: %s / %s / %s' % (drone, type(drone), drone.port)) CMAdb.cdb.TheOneRing.join(drone) if CMAdb.debug: CMAdb.log.debug('Requesting Discovery from %s' % str(drone)) discovery_params = [] for agent in self.config['initial_discovery']: params = ConfigFile.agent_params(self.config, 'discovery', agent, sysname) params['agent'] = agent params['instance'] = '_init_%s' % agent discovery_params.append(params) # Discover the permissions of all the lists of files we're configured to ask about # Note that there are several lists to keep the amount of data in any one list # down to a somewhat more reasonable level. 'fileattrs' output is really verbose for pathlist_name in self.config['perm_discovery_lists']: paths = self.config[pathlist_name] params = ConfigFile.agent_params(self.config, 'discovery', 'fileattrs', sysname) params['agent'] = 'fileattrs' params['instance'] = pathlist_name params['parameters'] = {'ASSIM_filelist': paths} discovery_params.append(params) if CMAdb.debug: CMAdb.log.debug('Discovery details: %s' % str(discovery_params)) for item in discovery_params: CMAdb.log.debug('Discovery item details: %s' % str(item)) drone.request_discovery(discovery_params) AssimEvent(drone, AssimEvent.OBJUP)
def processpkt(self, drone, _srcaddr, jsonobj, discoverychanged): '''Save away the network configuration data we got from netconfig JSON discovery. This includes all our NICs, their MAC addresses, all our IP addresses and so on for any (non-loopback) interface. Whee! This code is more complicated than I'd like but it's not obvious how to simplify it... ''' assert self.store.has_node(drone) if not discoverychanged: return data = jsonobj['data'] # The data portion of the JSON message currmacs = {} # Currmacs is a list of current NICNode objects belonging to this host # indexed by MAC address # Get our current list of NICs iflist = self.store.load_related(drone, CMAconsts.REL_nicowner, NICNode) for nic in iflist: currmacs[nic.macaddr] = nic primaryifname = None newmacs = {} # Newmacs is a list of NICNode objects found/created by this discovery # They are indexed by MAC address for ifname in data.keys(): # List of interfaces just below the data section ifinfo = data[ifname] if 'address' not in ifinfo: continue macaddr = str(ifinfo['address']) newnic = self.store.load_or_create(NICNode, domain=drone.domain , macaddr=macaddr, ifname=ifname, json=str(ifinfo)) newmacs[macaddr] = newnic if 'default_gw' in ifinfo and primaryifname is None: primaryifname = ifname # Now compare the two sets of MAC addresses (old and new) and update the "old" MAC # address with info from the new discovery and deleting any MAC addresses that # we don't have any more... for macaddr in currmacs.keys(): currmac = currmacs[macaddr] if macaddr in newmacs: # This MAC may need updating newmacs[macaddr] = currmac.update_attributes(newmacs[macaddr]) else: # This MAC has disappeared self.store.separate(drone, CMAconsts.REL_ipowner, currmac) #self.store.separate(drone, CMAconsts.REL_causes, currmac) # @TODO Needs to be a 'careful, complete' reference count deletion... self.store.delete(currmac) del currmacs[macaddr] currmacs = None # Create REL_nicowner relationships for any newly created NIC nodes for macaddr in newmacs.keys(): nic = newmacs[macaddr] self.store.relate_new(drone, CMAconsts.REL_nicowner, nic, {'causes': True}) #self.store.relate(drone, CMAconsts.REL_causes, nic) # Now newmacs contains all the updated info about our current NICs # Let's figure out what's happening with our IP addresses... primaryip = None for macaddr in newmacs.keys(): mac = newmacs[macaddr] ifname = mac.ifname #print >> sys.stderr, 'MAC IS', str(mac) #print >> sys.stderr, 'DATA IS:', str(data) #print >> sys.stderr, 'IFNAME IS', str(ifname) iptable = data[str(ifname)]['ipaddrs'] currips = {} iplist = self.store.load_related(mac, CMAconsts.REL_ipowner, IPaddrNode) for ip in iplist: currips[ip.ipaddr] = ip newips = {} for ip in iptable.keys(): # keys are 'ip/mask' in CIDR format ipname = ':::INVALID:::' ipinfo = iptable[ip] if 'name' in ipinfo: ipname = ipinfo['name'] if ipinfo['scope'] != 'global': continue iponly, cidrmask = ip.split('/') netaddr = pyNetAddr(iponly).toIPv6() if netaddr.islocal(): # We ignore loopback addresses - might be wrong... continue ipnode = self.store.load_or_create(IPaddrNode , domain=drone.domain, ipaddr=str(netaddr), cidrmask=cidrmask) ## FIXME: Not an ideal way to determine primary (preferred) IP address... ## it's a bit idiosyncratic to Linux... ## A better way would be to use their 'startaddr' (w/o the port) ## This uses the IP address they used to talk to us. if ifname == primaryifname and primaryip is None and ipname == ifname: primaryip = ipnode drone.primary_ip_addr = str(primaryip.ipaddr) newips[str(netaddr)] = ipnode # compare the two sets of IP addresses (old and new) for ipaddr in currips.keys(): currip = currips[ipaddr] if ipaddr in newips: newips[ipaddr] = currip.update_attributes(newips[ipaddr]) else: #print >> sys.stderr, 'Deleting address %s from MAC %s' % (currip, macaddr) #print >> sys.stderr, 'currip:%s, currips:%s' % (str(currip), str(currips)) self.log.debug('Deleting address %s from MAC %s' % (currip, macaddr)) self.log.debug('currip:%s, currips:%s' % (str(currip), str(currips))) self.store.separate(mac, rel_type=CMAconsts.REL_ipowner, obj=currip) # @TODO Needs to be a 'careful, complete' reference count deletion... self.store.delete(currip) del currips[ipaddr] # Create REL_ipowner relationships for all the newly created IP nodes for ipaddr in newips.keys(): ip = newips[ipaddr] self.store.relate_new(mac, CMAconsts.REL_ipowner, ip, {'causes': True})
def dispatch(self, origaddr, frameset): json = None addrstr = repr(origaddr) fstype = frameset.get_framesettype() localtime = None listenaddr = None keyid = None pubkey = None keysize = None #print >> sys.stderr, ("DispatchSTARTUP: received [%s] FrameSet from [%s]" #% (FrameSetTypes.get(fstype)[0], addrstr)) if CMAdb.debug: CMAdb.log.debug("DispatchSTARTUP: received [%s] FrameSet from [%s]" % (FrameSetTypes.get(fstype)[0], addrstr)) if not self.io.connactive(origaddr): self.io.closeconn(DEFAULT_FSP_QID, origaddr) CMAdb.transaction.post_transaction_packets.append(FrameSetTypes.ACKSTARTUP) for frame in frameset.iter(): frametype = frame.frametype() if frametype == FrameTypes.WALLCLOCK: localtime = str(frame.getint()) elif frametype == FrameTypes.IPPORT: listenaddr = frame.getnetaddr() elif frametype == FrameTypes.HOSTNAME: sysname = frame.getstr() if sysname == CMAdb.nodename: if origaddr.islocal(): CMAdb.log.info("Received STARTUP from local system (%s)" % addrstr) else: addresses = ['127.0.0.1', '::ffff:127.0.0.1', '::1' ] for address in addresses: localhost = pyNetAddr(address) self.io.addalias(localhost, origaddr) CMAdb.log.info("Aliasing %s to %s" % (localhost, origaddr)) elif frametype == FrameTypes.JSDISCOVER: json = frame.getstr() #print >> sys.stderr, 'GOT JSDISCOVER JSON: [%s] (strlen:%s,framelen:%s)' \ #% (json, len(json), frame.framelen()) elif frametype == FrameTypes.KEYID: keyid = frame.getstr() elif frametype == FrameTypes.PUBKEYCURVE25519: pubkey = frame.framevalue() keysize = frame.framelen() joininfo = pyConfigContext(init=json) origaddr, isNAT = self.validate_source_ip(sysname, origaddr, joininfo, listenaddr) CMAdb.log.info('Drone %s registered from address %s (%s) port %s, key_id %s' % (sysname, origaddr, addrstr, origaddr.port(), keyid)) drone = self.droneinfo.add(sysname, 'STARTUP packet', port=origaddr.port() , primary_ip_addr=str(origaddr)) drone.listenaddr = str(listenaddr) # Seems good to hang onto this... drone.isNAT = isNAT # ditto... # Did they give us the crypto info we need? if keyid is None or pubkey is None: if CMAdb.debug: CMAdb.log.debug('Drone %s registered with keyid %s and pubkey provided: %s' % (self, keyid, pubkey is not None)) else: if drone.key_id == '': if not keyid.startswith(sysname + "@@"): CMAdb.log.warning("Drone %s wants to register with key_id %s -- permitted." , sysname, keyid) if not cryptcurve25519_save_public_key(keyid, pubkey, keysize): raise ValueError("Drone %s public key (key_id %s, %d bytes) is invalid." % (sysname, keyid, keysize)) elif drone.key_id != keyid: raise ValueError("Drone %s tried to register with key_id %s instead of %s." % (sysname, keyid, drone.key_id)) drone.set_crypto_identity(keyid=keyid) pyCryptFrame.dest_set_key_id(origaddr, keyid) # # THIS IS HERE BECAUSE OF A PROTOCOL BUG... # @FIXME Protocol bug when starting up a connection if our first (this) packet gets lost, # then the protocol doesn't retransmit it. # More specifically, it seems to clear it out of the queue. # This might be CMA bug or a protocol bug. It's not clear... # The packet goes into the queue, but if that packet is lost in transmission, then when # we come back around here, it's not in the queue any more, even though it # definitely wasn't ACKed. # Once this is fixed, this "add_packet" call needs to go *after* the 'if' statement below. # CMAdb.transaction.add_packet(origaddr, FrameSetTypes.SETCONFIG, (str(self.config), ) , FrameTypes.CONFIGJSON) if (localtime is not None): if (drone.lastjoin == localtime): CMAdb.log.warning('Drone %s [%s] sent duplicate STARTUP' % (sysname, origaddr)) if CMAdb.debug: self.io.log_conn(origaddr) return drone.lastjoin = localtime #print >> sys.stderr, 'DRONE from find: ', drone, type(drone), drone.port drone.startaddr = str(origaddr) if json is not None: drone.logjson(origaddr, json) if CMAdb.debug: CMAdb.log.debug('Joining TheOneRing: %s / %s / %s' % (drone, type(drone), drone.port)) CMAdb.cdb.TheOneRing.join(drone) if CMAdb.debug: CMAdb.log.debug('Requesting Discovery from %s' % str(drone)) discovery_params = [] for agent in self.config['initial_discovery']: params = ConfigFile.agent_params(self.config, 'discovery', agent, sysname) params['agent'] = agent params['instance'] = '_init_%s' % agent discovery_params.append(params) # Discover the permissions of all the lists of files we're configured to ask about # Note that there are several lists to keep the amount of data in any one list # down to a somewhat more reasonable level. 'fileattrs' output is really verbose for pathlist_name in self.config['perm_discovery_lists']: paths = self.config[pathlist_name] params = ConfigFile.agent_params(self.config, 'discovery', 'fileattrs', sysname) params['agent'] = 'fileattrs' params['instance'] = pathlist_name params['parameters'] = {'ASSIM_filelist': paths} discovery_params.append(params) if CMAdb.debug: CMAdb.log.debug('Discovery details: %s' % str(discovery_params)) drone.request_discovery(discovery_params) AssimEvent(drone, AssimEvent.OBJUP)
def destaddr(self, ring=None): '''Return the "primary" IP for this host as a pyNetAddr with port''' return pyNetAddr(self.select_ip(ring=ring), port=self.port)
def main(): 'Main program for the CMA (Collective Management Authority)' py2neo_major_version = int(PY2NEO_VERSION.partition('.')[0]) if py2neo_major_version not in SUPPORTED_PY2NEO_VERSIONS: raise EnvironmentError('py2neo version %s not supported' % PY2NEO_VERSION) DefaultPort = 1984 # VERY Linux-specific - but useful and apparently correct ;-) PrimaryIPcmd = \ "ip address show primary scope global | grep '^ *inet' | sed -e 's%^ *inet *%%' -e 's%/.*%%'" ipfd = os.popen(PrimaryIPcmd, 'r') OurAddrStr = ('%s:%d' % (ipfd.readline().rstrip(), DefaultPort)) ipfd.close() parser = optparse.OptionParser( prog='CMA', version=AssimCtypes.VERSION_STRING, description= 'Collective Management Authority for the Assimilation System', usage='cma.py [--bind address:port]') parser.add_option( '-b', '--bind', action='store', default=None, dest='bind', metavar='address:port-to-bind-to', help='Address:port to listen to - for nanoprobes to connect to') parser.add_option( '-d', '--debug', action='store', default=0, dest='debug', help= 'enable debug for CMA and libraries - value is debug level for C libraries.' ) parser.add_option('-s', '--status', action='store_true', default=False, dest='status', help='Return status of running CMA') parser.add_option('-k', '--kill', action='store_true', default=False, dest='kill', help='Shut down running CMA.') parser.add_option('-e', '--erasedb', action='store_true', default=False, dest='erasedb', help='Erase Neo4J before starting') parser.add_option('-f', '--foreground', action='store_true', default=False, dest='foreground', help='keep the CMA from going into the background') parser.add_option('-p', '--pidfile', action='store', default='/var/run/assimilation/cma', dest='pidfile', metavar='pidfile-pathname', help='full pathname of where to locate our pid file') parser.add_option('-T', '--trace', action='store_true', default=False, dest='doTrace', help='Trace CMA execution') parser.add_option('-u', '--user', action='store', default=CMAUSERID, dest='userid', metavar='userid', help='userid to run the CMA as') opt = parser.parse_args()[0] from AssimCtypes import daemonize_me, assimilation_openlog, are_we_already_running, \ kill_pid_service, pidrunningstat_to_status, remove_pid_file, rmpid_and_exit_on_signal if opt.status: rc = pidrunningstat_to_status(are_we_already_running( opt.pidfile, None)) return rc if opt.kill: if kill_pid_service(opt.pidfile, 15) < 0: print >> sys.stderr, "Unable to stop CMA." return 1 return 0 opt.debug = int(opt.debug) # This doesn't seem to work no matter where I invoke it... # But if we don't fork in daemonize_me() ('C' code), it works great... # def cleanup(): # remove_pid_file(opt.pidfile) # atexit.register(cleanup) # signal.signal(signal.SIGTERM, lambda sig, stack: sys.exit(0)) # signal.signal(signal.SIGINT, lambda sig, stack: sys.exit(0)) from cmadb import CMAdb CMAdb.running_under_docker() make_pid_dir(opt.pidfile, opt.userid) make_key_dir(CRYPTKEYDIR, opt.userid) cryptwarnings = pyCryptCurve25519.initkeys() for warn in cryptwarnings: print >> sys.stderr, ("WARNING: %s" % warn) #print >> sys.stderr, 'All known key ids:' keyids = pyCryptFrame.get_key_ids() keyids.sort() for keyid in keyids: if not keyid.startswith(CMA_KEY_PREFIX): try: # @FIXME This is not an ideal way to associate identities with hosts # in a multi-tenant environment # @FIXME - don't think I need to do the associate_identity at all any more... hostname, notused_post = keyid.split('@@', 1) notused_post = notused_post pyCryptFrame.associate_identity(hostname, keyid) except ValueError: pass #print >> sys.stderr, '> %s/%s' % (keyid, pyCryptFrame.get_identity(keyid)) daemonize_me(opt.foreground, '/', opt.pidfile, 20) rmpid_and_exit_on_signal(opt.pidfile, signal.SIGTERM) # Next statement can't appear before daemonize_me() or bind() fails -- not quite sure why... assimilation_openlog("cma") from packetlistener import PacketListener from messagedispatcher import MessageDispatcher from dispatchtarget import DispatchTarget from monitoring import MonitoringRule from AssimCclasses import pyNetAddr, pySignFrame, pyReliableUDP, \ pyPacketDecoder from AssimCtypes import CONFIGNAME_CMAINIT, CONFIGNAME_CMAADDR, CONFIGNAME_CMADISCOVER, \ CONFIGNAME_CMAFAIL, CONFIGNAME_CMAPORT, CONFIGNAME_OUTSIG, CONFIGNAME_COMPRESSTYPE, \ CONFIGNAME_COMPRESS, proj_class_incr_debug, LONG_LICENSE_STRING, MONRULEINSTALL_DIR if opt.debug: print >> sys.stderr, ('Setting debug to %s' % opt.debug) for debug in range(opt.debug): debug = debug print >> sys.stderr, ('Incrementing C-level debug by one.') proj_class_incr_debug(None) # Input our monitoring rule templates # They only exist in flat files and in memory - they aren't in the database MonitoringRule.load_tree(MONRULEINSTALL_DIR) print >> sys.stderr, ('Monitoring rules loaded from %s' % MONRULEINSTALL_DIR) execobserver_constraints = { 'nodetype': [ 'Drone', 'IPaddrNode', 'MonitorAction', 'NICNode', 'ProcessNode', 'SystemNode', ] } ForkExecObserver(constraints=execobserver_constraints, scriptdir=NOTIFICATION_SCRIPT_DIR) print >> sys.stderr, ('Fork/Event observer dispatching from %s' % NOTIFICATION_SCRIPT_DIR) if opt.bind is not None: OurAddrStr = opt.bind OurAddr = pyNetAddr(OurAddrStr) if OurAddr.port() == 0: OurAddr.setport(DefaultPort) try: configinfo = ConfigFile(filename=CMAINITFILE) except IOError: configinfo = ConfigFile() if opt.bind is not None: bindaddr = pyNetAddr(opt.bind) if bindaddr.port() == 0: bindaddr.setport(ConfigFile[CONFIGNAME_CMAPORT]) configinfo[CONFIGNAME_CMAINIT] = bindaddr configinfo[CONFIGNAME_CMADISCOVER] = OurAddr configinfo[CONFIGNAME_CMAFAIL] = OurAddr configinfo[CONFIGNAME_CMAADDR] = OurAddr if (CONFIGNAME_COMPRESSTYPE in configinfo): configinfo[CONFIGNAME_COMPRESS] \ = pyCompressFrame(compression_method=configinfo[CONFIGNAME_COMPRESSTYPE]) configinfo[CONFIGNAME_OUTSIG] = pySignFrame(1) config = configinfo.complete_config() addr = config[CONFIGNAME_CMAINIT] # pylint is confused: addr is a pyNetAddr, not a pyConfigContext # pylint: disable=E1101 if addr.port() == 0: addr.setport(DefaultPort) ourport = addr.port() for elem in (CONFIGNAME_CMAINIT, CONFIGNAME_CMAADDR, CONFIGNAME_CMADISCOVER, CONFIGNAME_CMAFAIL): if elem in config: config[elem] = pyNetAddr(str(config[elem]), port=ourport) io = pyReliableUDP(config, pyPacketDecoder()) io.setrcvbufsize( 10 * 1024 * 1024) # No harm in asking - it will get us the best we can get... io.setsendbufsize( 1024 * 1024) # Most of the traffic volume is inbound from discovery drop_privileges_permanently(opt.userid) try: cmainit.CMAinit(io, cleanoutdb=opt.erasedb, debug=(opt.debug > 0)) except RuntimeError: remove_pid_file(opt.pidfile) raise for warn in cryptwarnings: CMAdb.log.warning(warn) if CMAdb.cdb.db.neo4j_version[0] not in SUPPORTED_NEO4J_VERSIONS: raise EnvironmentError('Neo4j version %s.%s.%s not supported' % CMAdb.cdb.db.neo4j_version) CMAdb.log.info('Listening on: %s' % str(config[CONFIGNAME_CMAINIT])) CMAdb.log.info('Requesting return packets sent to: %s' % str(OurAddr)) CMAdb.log.info('Socket input buffer size: %d' % io.getrcvbufsize()) CMAdb.log.info('Socket output buffer size: %d' % io.getsendbufsize()) keyids = pyCryptFrame.get_key_ids() keyids.sort() for keyid in keyids: CMAdb.log.info('KeyId %s Identity %s' % (keyid, pyCryptFrame.get_identity(keyid))) if CMAdb.debug: CMAdb.log.debug('C-library Debug was set to %s' % opt.debug) CMAdb.log.debug('TheOneRing created - id = %s' % CMAdb.TheOneRing) CMAdb.log.debug('Config Object sent to nanoprobes: %s' % config) jvmfd = os.popen('java -version 2>&1') jvers = jvmfd.readline() jvmfd.close() disp = MessageDispatcher(DispatchTarget.dispatchtable) neovers = CMAdb.cdb.db.neo4j_version neoversstring = (('%s.%s.%s' if len(neovers) == 3 else '%s.%s.%s%s') % neovers[0:3]) CMAdb.log.info('Starting CMA version %s - licensed under %s' % (AssimCtypes.VERSION_STRING, LONG_LICENSE_STRING)) CMAdb.log.info( 'Neo4j version %s // py2neo version %s // Python version %s // %s' % (('%s.%s.%s' % CMAdb.cdb.db.neo4j_version), str(py2neo.__version__), ('%s.%s.%s' % sys.version_info[0:3]), jvers)) if opt.foreground: print >> sys.stderr, ( 'Starting CMA version %s - licensed under %s' % (AssimCtypes.VERSION_STRING, LONG_LICENSE_STRING)) print >> sys.stderr, ( 'Neo4j version %s // py2neo version %s // Python version %s // %s' % (neoversstring, PY2NEO_VERSION, ('%s.%s.%s' % sys.version_info[0:3]), jvers)) if len(neovers) > 3: CMAdb.log.warning( 'Neo4j version %s is beta code - results not guaranteed.' % str(neovers)) # Important to note that we don't want PacketListener to create its own 'io' object # or it will screw up the ReliableUDP protocol... listener = PacketListener(config, disp, io=io) mandatory_modules = ['discoverylistener'] for mandatory in mandatory_modules: importlib.import_module(mandatory) #pylint is confused here... # pylint: disable=E1133 for optional in config['optional_modules']: importlib.import_module(optional) if opt.doTrace: import trace tracer = trace.Trace(count=False, trace=True) if CMAdb.debug: CMAdb.log.debug('Starting up traced listener.listen(); debug=%d' % opt.debug) if opt.foreground: print >> sys.stderr, ( 'cma: Starting up traced listener.listen() in foreground; debug=%d' % opt.debug) tracer.run('listener.listen()') else: if CMAdb.debug: CMAdb.log.debug( 'Starting up untraced listener.listen(); debug=%d' % opt.debug) if opt.foreground: print >> sys.stderr, ( 'cma: Starting up untraced listener.listen() in foreground; debug=%d' % opt.debug) # This is kind of a kludge, we should really look again at # at initializition and so on. # This module *ought* to be optional. # that would involve adding some Drone callbacks for creation of new Drones BestPractices(config, io, CMAdb.store, CMAdb.log, opt.debug) listener.listen() return 0
def default_defaults(): '''This is our default - for defaults Sounds kinda weird, but it makes sense - and is handy for our tests to not have to have the current defaults updated all the time... ''' return { 'OUI': { # Addendum of locally-known OUIs - feel free to contribute ones you find... # Python includes lots of them, but is missing newer ones. # Note that they have to be in lower case with '-' separators. # You can find the latest data here: # http://standards.ieee.org/cgi-bin/ouisearch '18-0c-ac': 'Canon, Inc.', '28-d2-44': 'LCFC(HeFei) Electronics Technology Co., Ltd.', '84-7a-88': 'HTC Corporation', 'b0-79-3c': 'Revolv, Inc.', 'b8-ee-65': 'Liteon Technology Corporation', 'bc-ee-7b': 'ASUSTek Computer, Inc.', 'c8-b5-b7': 'Apple.', 'cc-3a-61': 'SAMSUNG ELECTRO MECHANICS CO., LTD.', 'd8-50-e6': 'ASUSTek COMPUTER INC.', 'e8-ab-fa': 'Shenzhen Reecam Tech.Ltd.', }, # # Below is the set of modules that we import before starting up # Each of them triggers different kinds of conditional discovery # as per its design... 'optional_modules': [ # List of optional modules to be included 'linkdiscovery', # Perform CDP/LLDP monitoring 'checksumdiscovery', # Perform tripwire-like checksum monitoring 'monitoringdiscovery', # Initiates monitoring based on service # discovery 'arpdiscovery', # Listen for ARP packets: IPs and MACs 'procsysdiscovery', # Discovers content of /proc/sys ], 'contrib_modules': [], # List of contrib modules to be imported # # Always start these discovery plugins below when a Drone comes online # 'initial_discovery':['os', # OS properties 'cpu', # CPU properties 'packages', # What packages are installed? 'monitoringagents',# What monitoring agents are installed? 'ulimit', # What are current ulimit values? 'commands', # Discovers installed commands 'nsswitch', # Discovers nsswitch configuration (Linux) 'findmnt', # Discovers mounted filesystems (Linux) 'sshd', # Discovers sshd configuration 'tcpdiscovery' # Discover services ], 'cmaport': 1984, # Our listening port 'cmainit': pyNetAddr("0.0.0.0:1984"), # Our listening address 'compression_threshold': 20000, # Compress packets >= 20 kbytes 'compression_method': "zlib", # Compression method 'discovery': { 'repeat': 15*60, # Default repeat interval in seconds 'warn': 120, # Default slow discovery warning time 'timeout': 300, # Default timeout interval in seconds 'agents': { # Configuration information for individual agent types, # optionally including machine "checksumdiscovery": {'repeat':3600*8, 'timeout': 10*60 , 'warn':300}, "os": {'repeat': 0, 'timeout': 60, 'warn': 5}, "cpu": {'repeat': 0, 'timeout': 60, 'warn': 5} # "arpdiscovery/servidor": {'repeat': 60}, }, }, 'monitoring': { 'repeat': 120, # Default repeat interval in seconds 'warn': 60, # Default slow monitoring warning time 'timeout': 180, # Default repeat interval in seconds 'agents': { # Configuration information for individual agent types, # optionally including machine # "lsb::ssh": {'repeat': int, 'timeout': int}, # "ocf::Neo4j/servidor": {'repeat': int, 'timeout': int}, # "nagios::check_load": {'repeat': 60, 'timeout': 30, # # I would really rather have a pure run queue length # but there's no agent for that. Sigh... # # -r == scale load average by by number of CPUs # -w == floating point warning load averages # (1,5,15 minute values) # -c == floating point critical load averages # (1,5,15 minute values) # 'args': {'__ARGS__': ['-r', '-w', '4,3,2', '-c', '4,3,2']}}, }, 'nagiospath': [ "/usr/lib/nagios/plugins", # places to look for Nagios agents "/usr/local/nagios/libexec", "/usr/nagios/libexec", "/opt/nrpe/libexec" ], }, 'heartbeats': { 'repeat': 1, # how frequently to heartbeat - in seconds 'warn': 5, # How long to wait when issuing a late heartbeat warning 'timeout': 30, # How long to wait before declaring a system dead }, } # End of return value
def main(): 'Main program for the CMA (Collective Management Authority)' py2neo_major_version = int(PY2NEO_VERSION.partition('.')[0]) if py2neo_major_version not in SUPPORTED_PY2NEO_VERSIONS: raise EnvironmentError('py2neo version %s not supported' % PY2NEO_VERSION) DefaultPort = 1984 # VERY Linux-specific - but useful and apparently correct ;-) PrimaryIPcmd = \ "ip address show primary scope global | grep '^ *inet' | sed -e 's%^ *inet *%%' -e 's%/.*%%'" ipfd = os.popen(PrimaryIPcmd, 'r') OurAddrStr = ('%s:%d' % (ipfd.readline().rstrip(), DefaultPort)) ipfd.close() parser = optparse.OptionParser(prog='CMA', version=AssimCtypes.VERSION_STRING, description='Collective Management Authority for the Assimilation System', usage='cma.py [--bind address:port]') parser.add_option('-b', '--bind', action='store', default=None, dest='bind' , metavar='address:port-to-bind-to' , help='Address:port to listen to - for nanoprobes to connect to') parser.add_option('-d', '--debug', action='store', default=0, dest='debug' , help='enable debug for CMA and libraries - value is debug level for C libraries.') parser.add_option('-s', '--status', action='store_true', default=False, dest='status' , help='Return status of running CMA') parser.add_option('-k', '--kill', action='store_true', default=False, dest='kill' , help='Shut down running CMA.') parser.add_option('-e', '--erasedb', action='store_true', default=False, dest='erasedb' , help='Erase Neo4J before starting') parser.add_option('-f', '--foreground', action='store_true', default=False, dest='foreground' , help='keep the CMA from going into the background') parser.add_option('-p', '--pidfile', action='store', default='/var/run/assimilation/cma' , dest='pidfile', metavar='pidfile-pathname' , help='full pathname of where to locate our pid file') parser.add_option('-T', '--trace', action='store_true', default=False, dest='doTrace' , help='Trace CMA execution') parser.add_option('-u', '--user', action='store', default=CMAUSERID, dest='userid' , metavar='userid' , help='userid to run the CMA as') opt = parser.parse_args()[0] from AssimCtypes import daemonize_me, assimilation_openlog, are_we_already_running, \ kill_pid_service, pidrunningstat_to_status, remove_pid_file, rmpid_and_exit_on_signal if opt.status: rc = pidrunningstat_to_status(are_we_already_running(opt.pidfile, None)) return rc if opt.kill: if kill_pid_service(opt.pidfile, 15) < 0: print >> sys.stderr, "Unable to stop CMA." return 1 return 0 opt.debug = int(opt.debug) # This doesn't seem to work no matter where I invoke it... # But if we don't fork in daemonize_me() ('C' code), it works great... # def cleanup(): # remove_pid_file(opt.pidfile) # atexit.register(cleanup) # signal.signal(signal.SIGTERM, lambda sig, stack: sys.exit(0)) # signal.signal(signal.SIGINT, lambda sig, stack: sys.exit(0)) from cmadb import CMAdb CMAdb.running_under_docker() make_pid_dir(opt.pidfile, opt.userid) make_key_dir(CRYPTKEYDIR, opt.userid) cryptwarnings = pyCryptCurve25519.initkeys() for warn in cryptwarnings: print >> sys.stderr, ("WARNING: %s" % warn) #print >> sys.stderr, 'All known key ids:' keyids = pyCryptFrame.get_key_ids() keyids.sort() for keyid in keyids: if not keyid.startswith(CMA_KEY_PREFIX): try: # @FIXME This is not an ideal way to associate identities with hosts # in a multi-tenant environment # @FIXME - don't think I need to do the associate_identity at all any more... hostname, notused_post = keyid.split('@@', 1) notused_post = notused_post pyCryptFrame.associate_identity(hostname, keyid) except ValueError: pass #print >> sys.stderr, '> %s/%s' % (keyid, pyCryptFrame.get_identity(keyid)) daemonize_me(opt.foreground, '/', opt.pidfile, 20) rmpid_and_exit_on_signal(opt.pidfile, signal.SIGTERM) # Next statement can't appear before daemonize_me() or bind() fails -- not quite sure why... assimilation_openlog("cma") from packetlistener import PacketListener from messagedispatcher import MessageDispatcher from dispatchtarget import DispatchTarget from monitoring import MonitoringRule from AssimCclasses import pyNetAddr, pySignFrame, pyReliableUDP, \ pyPacketDecoder from AssimCtypes import CONFIGNAME_CMAINIT, CONFIGNAME_CMAADDR, CONFIGNAME_CMADISCOVER, \ CONFIGNAME_CMAFAIL, CONFIGNAME_CMAPORT, CONFIGNAME_OUTSIG, CONFIGNAME_COMPRESSTYPE, \ CONFIGNAME_COMPRESS, proj_class_incr_debug, LONG_LICENSE_STRING, MONRULEINSTALL_DIR if opt.debug: print >> sys.stderr, ('Setting debug to %s' % opt.debug) for debug in range(opt.debug): debug = debug print >> sys.stderr, ('Incrementing C-level debug by one.') proj_class_incr_debug(None) # Input our monitoring rule templates # They only exist in flat files and in memory - they aren't in the database MonitoringRule.load_tree(MONRULEINSTALL_DIR) print >> sys.stderr, ('Monitoring rules loaded from %s' % MONRULEINSTALL_DIR) execobserver_constraints = { 'nodetype': ['Drone', 'IPaddrNode', 'MonitorAction', 'NICNode', 'ProcessNode', 'SystemNode', ] } ForkExecObserver(constraints=execobserver_constraints, scriptdir=NOTIFICATION_SCRIPT_DIR) print >> sys.stderr, ('Fork/Event observer dispatching from %s' % NOTIFICATION_SCRIPT_DIR) if opt.bind is not None: OurAddrStr = opt.bind OurAddr = pyNetAddr(OurAddrStr) if OurAddr.port() == 0: OurAddr.setport(DefaultPort) try: configinfo = ConfigFile(filename=CMAINITFILE) except IOError: configinfo = ConfigFile() if opt.bind is not None: bindaddr = pyNetAddr(opt.bind) if bindaddr.port() == 0: bindaddr.setport(ConfigFile[CONFIGNAME_CMAPORT]) configinfo[CONFIGNAME_CMAINIT] = bindaddr configinfo[CONFIGNAME_CMADISCOVER] = OurAddr configinfo[CONFIGNAME_CMAFAIL] = OurAddr configinfo[CONFIGNAME_CMAADDR] = OurAddr if (CONFIGNAME_COMPRESSTYPE in configinfo): configinfo[CONFIGNAME_COMPRESS] \ = pyCompressFrame(compression_method=configinfo[CONFIGNAME_COMPRESSTYPE]) configinfo[CONFIGNAME_OUTSIG] = pySignFrame(1) config = configinfo.complete_config() addr = config[CONFIGNAME_CMAINIT] # pylint is confused: addr is a pyNetAddr, not a pyConfigContext # pylint: disable=E1101 if addr.port() == 0: addr.setport(DefaultPort) ourport = addr.port() for elem in (CONFIGNAME_CMAINIT, CONFIGNAME_CMAADDR , CONFIGNAME_CMADISCOVER, CONFIGNAME_CMAFAIL): if elem in config: config[elem] = pyNetAddr(str(config[elem]), port=ourport) io = pyReliableUDP(config, pyPacketDecoder()) io.setrcvbufsize(10*1024*1024) # No harm in asking - it will get us the best we can get... io.setsendbufsize(1024*1024) # Most of the traffic volume is inbound from discovery drop_privileges_permanently(opt.userid) try: cmainit.CMAinit(io, cleanoutdb=opt.erasedb, debug=(opt.debug > 0)) except RuntimeError: remove_pid_file(opt.pidfile) raise for warn in cryptwarnings: CMAdb.log.warning(warn) if CMAdb.cdb.db.neo4j_version[0] not in SUPPORTED_NEO4J_VERSIONS: raise EnvironmentError('Neo4j version %s.%s.%s not supported' % CMAdb.cdb.db.neo4j_version) CMAdb.log.info('Listening on: %s' % str(config[CONFIGNAME_CMAINIT])) CMAdb.log.info('Requesting return packets sent to: %s' % str(OurAddr)) CMAdb.log.info('Socket input buffer size: %d' % io.getrcvbufsize()) CMAdb.log.info('Socket output buffer size: %d' % io.getsendbufsize()) keyids = pyCryptFrame.get_key_ids() keyids.sort() for keyid in keyids: CMAdb.log.info('KeyId %s Identity %s' % (keyid, pyCryptFrame.get_identity(keyid))) if CMAdb.debug: CMAdb.log.debug('C-library Debug was set to %s' % opt.debug) CMAdb.log.debug('TheOneRing created - id = %s' % CMAdb.TheOneRing) CMAdb.log.debug('Config Object sent to nanoprobes: %s' % config) jvmfd = os.popen('java -version 2>&1') jvers = jvmfd.readline() jvmfd.close() disp = MessageDispatcher(DispatchTarget.dispatchtable) neovers = CMAdb.cdb.db.neo4j_version neoversstring = (('%s.%s.%s'if len(neovers) == 3 else '%s.%s.%s%s') % neovers[0:3]) CMAdb.log.info('Starting CMA version %s - licensed under %s' % (AssimCtypes.VERSION_STRING, LONG_LICENSE_STRING)) CMAdb.log.info('Neo4j version %s // py2neo version %s // Python version %s // %s' % (('%s.%s.%s' % CMAdb.cdb.db.neo4j_version) , str(py2neo.__version__) , ('%s.%s.%s' % sys.version_info[0:3]) , jvers)) if opt.foreground: print >> sys.stderr, ('Starting CMA version %s - licensed under %s' % (AssimCtypes.VERSION_STRING, LONG_LICENSE_STRING)) print >> sys.stderr, ('Neo4j version %s // py2neo version %s // Python version %s // %s' % ( neoversstring , PY2NEO_VERSION , ('%s.%s.%s' % sys.version_info[0:3]) , jvers)) if len(neovers) > 3: CMAdb.log.warning('Neo4j version %s is beta code - results not guaranteed.' % str(neovers)) # Important to note that we don't want PacketListener to create its own 'io' object # or it will screw up the ReliableUDP protocol... listener = PacketListener(config, disp, io=io) mandatory_modules = [ 'discoverylistener' ] for mandatory in mandatory_modules: importlib.import_module(mandatory) #pylint is confused here... # pylint: disable=E1133 for optional in config['optional_modules']: importlib.import_module(optional) if opt.doTrace: import trace tracer = trace.Trace(count=False, trace=True) if CMAdb.debug: CMAdb.log.debug( 'Starting up traced listener.listen(); debug=%d' % opt.debug) if opt.foreground: print >> sys.stderr, ( 'cma: Starting up traced listener.listen() in foreground; debug=%d' % opt.debug) tracer.run('listener.listen()') else: if CMAdb.debug: CMAdb.log.debug( 'Starting up untraced listener.listen(); debug=%d' % opt.debug) if opt.foreground: print >> sys.stderr, ( 'cma: Starting up untraced listener.listen() in foreground; debug=%d' % opt.debug) # This is kind of a kludge, we should really look again at # at initializition and so on. # This module *ought* to be optional. # that would involve adding some Drone callbacks for creation of new Drones BestPractices(config, io, CMAdb.store, CMAdb.log, opt.debug) listener.listen() return 0
def default_defaults(): '''This is our default - for defaults Sounds kinda weird, but it makes sense - and is handy for our tests to not have to have the current defaults updated all the time... ''' return { 'OUI': { # Addendum of locally-known OUIs - feel free to contribute ones you find... # Python includes lots of them, but is missing newer ones. # Note that they have to be in lower case with '-' separators. # You can find the latest data here: # http://standards.ieee.org/cgi-bin/ouisearch '18-0c-ac': 'Canon, Inc.', '28-d2-44': 'LCFC(HeFei) Electronics Technology Co., Ltd.', '84-7a-88': 'HTC Corporation', 'b0-79-3c': 'Revolv, Inc.', 'b8-ee-65': 'Liteon Technology Corporation', 'bc-ee-7b': 'ASUSTek Computer, Inc.', 'c8-b5-b7': 'Apple.', 'cc-3a-61': 'SAMSUNG ELECTRO MECHANICS CO., LTD.', 'd8-50-e6': 'ASUSTek COMPUTER INC.', 'e8-ab-fa': 'Shenzhen Reecam Tech.Ltd.', }, # # Below is the set of modules that we import before starting up # Each of them triggers different kinds of conditional discovery # as per its design... 'optional_modules': [ # List of optional modules to be included 'linkdiscovery', # Perform CDP/LLDP monitoring 'checksumdiscovery', # Perform tripwire-like checksum monitoring 'monitoringdiscovery', # Initiates monitoring based on service # discovery 'arpdiscovery', # Listen for ARP packets: IPs and MACs 'procsysdiscovery', # Discovers content of /proc/sys ], 'contrib_modules': [], # List of contrib modules to be imported # # Always start these discovery plugins below when a Drone comes online # 'initial_discovery':['os', # OS properties 'cpu', # CPU properties 'packages', # What packages are installed? 'commands', # Discovers installed commands 'monitoringagents',# What monitoring agents are installed? 'login_defs', # /etc/login.defs configuration 'pam', # PAM configuration 'sudoers', # Discovers /etc/sudoers configuration 'ulimit', # What are current ulimit values? 'nsswitch', # Discovers nsswitch configuration (Linux) 'findmnt', # Discovers mounted filesystems (Linux) 'sshd', # Discovers sshd configuration 'tcpdiscovery' # Discover services ], 'cmaport': 1984, # Our listening port 'cmainit': pyNetAddr("0.0.0.0:1984"), # Our listening address 'compression_threshold': 20000, # Compress packets >= 20 kbytes 'compression_method': "zlib", # Compression method 'discovery': { 'repeat': 60, # Default repeat interval in seconds 'warn': 120, # Default slow discovery warning time 'timeout': 300, # Default timeout interval in seconds 'agents': { # Configuration information for individual agent types, # optionally including machine "checksumdiscovery": {'repeat':3600*8, 'timeout': 10*60 , 'warn':300}, "os": {'repeat': 0, 'timeout': 60, 'warn': 5}, "cpu": {'repeat': 0, 'timeout': 60, 'warn': 5} }, }, 'monitoring': { 'repeat': 15, # Default repeat interval in seconds 'warn': 60, # Default slow monitoring warning time 'timeout': 180, # Default repeat interval in seconds 'agents': { # Configuration information for individual agent types, # optionally including machine # "lsb::ssh": {'repeat': int, 'timeout': int}, # "ocf::Neo4j/servidor": {'repeat': int, 'timeout': int}, # "nagios::check_load": {'repeat': 60, 'timeout': 30, # # I would really rather have a pure run queue length # but there's no agent for that. Sigh... # # -r == scale load average by by number of CPUs # -w == floating point warning load averages # (1,5,15 minute values) # -c == floating point critical load averages # (1,5,15 minute values) # 'argv': ['-r', '-w', '4,3,2', '-c', '4,3,2']}, }, 'nagiospath': [ "/usr/lib/nagios/plugins", # places to look for Nagios agents "/usr/local/nagios/libexec", "/usr/nagios/libexec", "/opt/nrpe/libexec" ], }, 'heartbeats': { 'repeat': 1, # how frequently to heartbeat - in seconds 'warn': 5, # How long to wait when issuing a late heartbeat warning 'timeout': 30, # How long to wait before declaring a system dead }, 'bprulesbydomain': {# Default best practice rule sets by domain # Default the global domain to the base rule set CMAconsts.globaldomain: CMAconsts.BASERULESETNAME, # If you want different defaults for the global domain or # for any other domain, put them in your local config file. # I suspect these default rules will turn out to be a bit # harsh for many sites. }, # List of all the known best practice discovery types 'allbpdiscoverytypes': ['login_defs', 'pam', 'proc_sys', 'sshd'], # Prioritized list of checksum commands to use # we use the first one that's installed. 'checksum_cmds': [ '/usr/bin/sha256sum', '/usr/bin/sha224sum', '/usr/bin/sha384sum', '/usr/bin/sha512sum', '/usr/bin/sha1sum', '/usr/bin/md5sum', '/usr/bin/cksum', '/usr/bin/crc32'], # Files we *always* checksum 'checksum_files': [ '/bin/sh', '/bin/bash', '/bin/login', '/usr/bin/passwd', ], # Files/directories to always get the permissions of # Directories ending in / also have their contained files checked. 'permission_files': [ #'/', #'/bin', #'/dev', #'/etc', '/etc/audit/', '/etc/bash.bashrc', '/etc/bashrc', '/etc/bash_completion', '/etc/bash_completion.d/', #'/etc/grub.conf/', #'/etc/grub.d/', '/etc/group', '/etc/gshadow', '/etc/init.d/', '/etc/login.defs', '/etc/passwd', '/etc/profile', '/etc/profile.d/', '/etc/csh.cshrc', '/etc/selinux/', '/etc/shadow', '/lib/', '/lib64/', '/lib/modules/', #'/run', #'/run/lock', #'/run/user', '/sbin/', '/usr/', #'/usr/bin/', #'/usr/lib/', #'/usr/lib64/', '/usr/local/', #'/usr/local/bin/', #'/usr/local/sbin/', #'/usr/sbin', #'/var/', #'/var/log/', ], } # End of return value
def processpkt(self, drone, unused_srcaddr, jsonobj): '''Save away the network configuration data we got from netconfig JSON discovery. This includes all our NICs, their MAC addresses, all our IP addresses and so on for any (non-loopback) interface. Whee! This code is more complicated than I'd like but it's not obvious how to simplify it... ''' unused_srcaddr = unused_srcaddr assert self.store.has_node(drone) data = jsonobj['data'] # The data portion of the JSON message currmacs = {} # Get our current list of NICs iflist = self.store.load_related(drone, CMAconsts.REL_nicowner, NICNode) for nic in iflist: currmacs[nic.macaddr] = nic primaryifname = None newmacs = {} for ifname in data.keys(): # List of interfaces just below the data section ifinfo = data[ifname] if not 'address' in ifinfo: continue macaddr = str(ifinfo['address']) if macaddr.startswith('00:00:00:'): continue #print >> sys.stderr, 'CREATING NIC: MAC(%s) IF(%s)' % (str(macaddr), str(ifname)) newnic = self.store.load_or_create(NICNode, domain=drone.domain , macaddr=macaddr, ifname=ifname) newnic.ifname = ifname # @FIXME SHOULD THIS BE FIXED IN OBJECT CREATION?? newnic.domain = drone.domain # @FIXME SHOULD THIS BE FIXED IN OBJECT CREATION?? #print >> sys.stderr, 'NIC CREATED: %s' % (str(newnic)) newmacs[macaddr] = newnic if 'default_gw' in ifinfo and primaryifname == None: primaryifname = ifname # Now compare the two sets of MAC addresses (old and new) for macaddr in currmacs.keys(): currmac = currmacs[macaddr] if macaddr in newmacs: newmacs[macaddr] = currmac.update_attributes(newmacs[macaddr]) else: self.store.separate(drone, CMAconsts.REL_ipowner, currmac) #self.store.separate(drone, CMAconsts.REL_causes, currmac) # @TODO Needs to be a 'careful, complete' reference count deletion... self.store.delete(currmac) del currmacs[macaddr] currmacs = None # Create REL_nicowner relationships for the newly created NIC nodes for macaddr in newmacs.keys(): nic = newmacs[macaddr] self.store.relate_new(drone, CMAconsts.REL_nicowner, nic, {'causes': True}) #self.store.relate(drone, CMAconsts.REL_causes, nic) # Now newmacs contains all the current info about our NICs - old and new... # Let's figure out what's happening with our IP addresses... primaryip = None for macaddr in newmacs.keys(): mac = newmacs[macaddr] ifname = mac.ifname #print >> sys.stderr, 'MAC IS', str(mac) #print >> sys.stderr, 'DATA IS:', str(data) #print >> sys.stderr, 'IFNAME IS', str(ifname) iptable = data[str(ifname)]['ipaddrs'] currips = {} iplist = self.store.load_related(mac, CMAconsts.REL_ipowner, IPaddrNode) for ip in iplist: currips[ip.ipaddr] = ip newips = {} for ip in iptable.keys(): # keys are 'ip/mask' in CIDR format ipname = ':::INVALID:::' ipinfo = iptable[ip] if 'name' in ipinfo: ipname = ipinfo['name'] if ipinfo['scope'] != 'global': continue iponly, cidrmask = ip.split('/') netaddr = pyNetAddr(iponly).toIPv6() if netaddr.islocal(): # We ignore loopback addresses - might be wrong... continue ipnode = self.store.load_or_create(IPaddrNode , domain=drone.domain, ipaddr=str(netaddr), cidrmask=cidrmask) ## FIXME: Not an ideal way to determine primary (preferred) IP address... ## it's a bit idiosyncratic to Linux... ## A better way would be to use their 'startaddr' (w/o the port) ## This uses the IP address they used to talk to us. if ifname == primaryifname and primaryip is None and ipname == ifname: primaryip = ipnode drone.primary_ip_addr = str(primaryip.ipaddr) newips[str(netaddr)] = ipnode # compare the two sets of IP addresses (old and new) for ipaddr in currips.keys(): currip = currips[ipaddr] if ipaddr in newips: newips[ipaddr] = currip.update_attributes(newips[ipaddr]) else: print >> sys.stderr, 'Deleting address %s from MAC %s' % (currip, macaddr) print >> sys.stderr, 'currip:%s, currips:%s' % (str(currip), str(currips)) self.log.debug('Deleting address %s from MAC %s' % (currip, macaddr)) self.log.debug('currip:%s, currips:%s' % (str(currip), str(currips))) # @FIXME - this is a bug -- 'currip' is a string... - or _something_ is... self.store.separate(mac, currip, CMAconsts.REL_ipowner) # @TODO Needs to be a 'careful, complete' reference count deletion... self.store.delete(currip) del currips[ipaddr] # Create REL_ipowner relationships for all the newly created IP nodes for ipaddr in newips.keys(): ip = newips[ipaddr] self.store.relate_new(mac, CMAconsts.REL_ipowner, ip, {'causes': True})
def default_defaults(): '''This is our default - for defaults Sounds kinda weird, but it makes sense - and is handy for our tests to not have to have the current defaults updated all the time... ''' return { 'OUI': { # Addendum of locally-known OUIs - feel free to contribute ones you find... # Python includes lots of them, but is missing newer ones. # Note that they have to be in lower case with '-' separators. # You can find the latest data here: # http://standards.ieee.org/cgi-bin/ouisearch '18-0c-ac': 'Canon, Inc.', '28-d2-44': 'LCFC(HeFei) Electronics Technology Co., Ltd.', '84-7a-88': 'HTC Corporation', 'b0-79-3c': 'Revolv, Inc.', 'b8-ee-65': 'Liteon Technology Corporation', 'bc-ee-7b': 'ASUSTek Computer, Inc.', 'c8-b5-b7': 'Apple.', 'cc-3a-61': 'SAMSUNG ELECTRO MECHANICS CO., LTD.', 'd8-50-e6': 'ASUSTek COMPUTER INC.', 'e8-ab-fa': 'Shenzhen Reecam Tech.Ltd.', }, # # Below is the set of modules that we import before starting up # Each of them triggers different kinds of conditional discovery # as per its design... 'optional_modules': [ # List of optional modules to be included 'linkdiscovery', # Perform CDP/LLDP monitoring 'checksumdiscovery', # Perform tripwire-like checksum monitoring 'monitoringdiscovery', # Initiates monitoring based on service # discovery 'arpdiscovery', # Listen for ARP packets: IPs and MACs 'procsysdiscovery', # Discovers content of /proc/sys ], 'contrib_modules': [], # List of contrib modules to be imported # # Always start these discovery plugins below when a Drone comes online # 'initial_discovery':['os', # OS properties 'cpu', # CPU properties 'packages', # What packages are installed? 'monitoringagents',# What monitoring agents are installed? 'ulimit', # What are current ulimit values? 'commands', # Discovers installed commands 'nsswitch', # Discovers nsswitch configuration (Linux) 'findmnt', # Discovers mounted filesystems (Linux) 'sshd', # Discovers sshd configuration 'tcpdiscovery' # Discover services ], 'cmaport': 1984, # Our listening port 'cmainit': pyNetAddr("0.0.0.0:1984"), # Our listening address 'compression_threshold': 20000, # Compress packets >= 20 kbytes 'compression_method': "zlib", # Compression method 'discovery': { 'repeat': 60, # Default repeat interval in seconds 'warn': 120, # Default slow discovery warning time 'timeout': 300, # Default timeout interval in seconds 'agents': { # Configuration information for individual agent types, # optionally including machine "checksumdiscovery": {'repeat':3600*8, 'timeout': 10*60 , 'warn':300}, "os": {'repeat': 0, 'timeout': 60, 'warn': 5}, "cpu": {'repeat': 0, 'timeout': 60, 'warn': 5} # "arpdiscovery/servidor": {'repeat': 60}, }, }, 'monitoring': { 'repeat': 15, # Default repeat interval in seconds 'warn': 60, # Default slow monitoring warning time 'timeout': 180, # Default repeat interval in seconds 'agents': { # Configuration information for individual agent types, # optionally including machine # "lsb::ssh": {'repeat': int, 'timeout': int}, # "ocf::Neo4j/servidor": {'repeat': int, 'timeout': int}, # "nagios::check_load": {'repeat': 60, 'timeout': 30, # # I would really rather have a pure run queue length # but there's no agent for that. Sigh... # # -r == scale load average by by number of CPUs # -w == floating point warning load averages # (1,5,15 minute values) # -c == floating point critical load averages # (1,5,15 minute values) # 'argv': ['-r', '-w', '4,3,2', '-c', '4,3,2']}, }, 'nagiospath': [ "/usr/lib/nagios/plugins", # places to look for Nagios agents "/usr/local/nagios/libexec", "/usr/nagios/libexec", "/opt/nrpe/libexec" ], }, 'heartbeats': { 'repeat': 1, # how frequently to heartbeat - in seconds 'warn': 5, # How long to wait when issuing a late heartbeat warning 'timeout': 30, # How long to wait before declaring a system dead }, 'bprulesbydomain': {# Default best practice rule sets by domain # Default the global domain to the base rule set CMAconsts.globaldomain: CMAconsts.BASERULESETNAME, # If you want different defaults for the global domain or # for any other domain, put them in your local config file. # I suspect these default rules will turn out to be a bit # harsh for many sites. }, # List of all the known best practice discovery types 'allbpdiscoverytypes': ['proc_sys'], # Prioritized list of checksum commands to use # we use the first one that's installed. 'checksum_cmds': [ '/usr/bin/sha256sum', '/usr/bin/sha224sum', '/usr/bin/sha384sum', '/usr/bin/sha512sum', '/usr/bin/sha1sum', '/usr/bin/md5sum', '/usr/bin/cksum', '/usr/bin/crc32'], # Files we *always* checksum 'checksum_files': [ '/bin/sh', '/bin/bash', '/bin/login', '/usr/bin/passwd', ], # Files/directories to always get the permissions of # Directories ending in / also have their contained files checked. 'permission_files': [ #'/', #'/bin', #'/dev', #'/etc', '/etc/audit/', '/etc/bash.bashrc', '/etc/bashrc', '/etc/bash_completion', '/etc/bash_completion.d/', #'/etc/grub.conf/', #'/etc/grub.d/', '/etc/group', '/etc/gshadow', '/etc/init.d/', '/etc/login.defs', '/etc/passwd', '/etc/profile', '/etc/profile.d/', '/etc/csh.cshrc', '/etc/selinux/', '/etc/shadow', '/lib/', '/lib64/', '/lib/modules/', #'/run', #'/run/lock', #'/run/user', '/sbin/', '/usr/', #'/usr/bin/', #'/usr/lib/', #'/usr/lib64/', '/usr/local/', #'/usr/local/bin/', #'/usr/local/sbin/', #'/usr/sbin', #'/var/', #'/var/log/', ], } # End of return value