def start(self): global auditlog global tracelog tracelog = log.Logger('trace') auditlog = log.Logger('audit') self.tlsserver = eventlet.spawn( _tlshandler, self.bind_host, self.bind_port) self.unixdomainserver = eventlet.spawn(_unixdomainhandler)
def start(self): global auditlog global tracelog tracelog = log.Logger('trace') auditlog = log.Logger('audit') self.tlsserver = None if self.should_run_remoteapi(): self.start_remoteapi() else: eventlet.spawn_n(self.watch_for_cert) self.unixdomainserver = eventlet.spawn(_unixdomainhandler)
def missing_host_key(self, client, hostname, key): fingerprint = 'sha512$' + hashlib.sha512(key.asbytes()).hexdigest() cfg = self.cfm.get_node_attributes( self.node, ('pubkeys.ssh', 'pubkeys.addpolicy')) if 'pubkeys.ssh' not in cfg[self.node]: if ('pubkeys.addpolicy' in cfg[self.node] and cfg[self.node]['pubkeys.addpolicy'] and cfg[self.node]['pubkeys.addpolicy']['value'] == 'manual'): raise cexc.PubkeyInvalid('New ssh key detected', key.asbytes(), fingerprint, 'pubkeys.ssh', 'newkey') auditlog = log.Logger('audit') auditlog.log({ 'node': self.node, 'event': 'sshautoadd', 'fingerprint': fingerprint }) self.cfm.set_node_attributes( {self.node: { 'pubkeys.ssh': fingerprint }}) return True elif cfg[self.node]['pubkeys.ssh']['value'] == fingerprint: return True raise cexc.PubkeyInvalid('Mismatched SSH host key detected', key.asbytes(), fingerprint, 'pubkeys.ssh', 'mismatch')
def verify_cert(self, certificate): storedprint = self.cfm.get_node_attributes(self.node, (self.fieldname,) ) if (self.fieldname not in storedprint[self.node] or storedprint[self.node][self.fieldname]['value'] == ''): # no stored value, check policy for next action newpolicy = self.cfm.get_node_attributes(self.node, ('pubkeys.addpolicy',)) if ('pubkeys.addpolicy' in newpolicy[self.node] and 'value' in newpolicy[self.node]['pubkeys.addpolicy'] and newpolicy[self.node]['pubkeys.addpolicy']['value'] == 'manual'): # manual policy means always raise unless a match is set # manually fingerprint = get_fingerprint(certificate, 'sha256') raise cexc.PubkeyInvalid('New certificate detected', certificate, fingerprint, self.fieldname, 'newkey') # since the policy is not manual, go ahead and add new key # after logging to audit log fingerprint = get_fingerprint(certificate, 'sha256') auditlog = log.Logger('audit') auditlog.log({'node': self.node, 'event': 'certautoadd', 'fingerprint': fingerprint}) self.cfm.set_node_attributes( {self.node: {self.fieldname: fingerprint}}) return True elif cert_matches(storedprint[self.node][self.fieldname]['value'], certificate): return True fingerprint = get_fingerprint(certificate, 'sha256') raise cexc.PubkeyInvalid( 'Mismatched certificate detected', certificate, fingerprint, self.fieldname, 'mismatch')
def _daemonize(): if not 'fork' in os.__dict__: return thispid = os.fork() if thispid > 0: os.waitpid(thispid, 0) os._exit(0) os.setsid() thispid = os.fork() if thispid > 0: print 'confluent server starting as pid %d' % thispid os._exit(0) os.closerange(0, 2) os.umask(63) os.open(os.devnull, os.O_RDWR) os.dup2(0, 1) os.dup2(0, 2) sys.stdout = log.Logger('stdout') sys.stderr = log.Logger('stderr')
def __init__(self, node, configmanager, width=80, height=24): self.clearpending = False self.clearerror = False self.initsize = (width, height) self._dologging = True self._is_local = True self._isondemand = False self.error = None self._retrytime = 0 self.cfgmgr = configmanager self.node = node self.connectstate = 'unconnected' self._isalive = True self.buffer = pyte.Screen(100, 31) self.termstream = pyte.ByteStream() self.termstream.attach(self.buffer) self.livesessions = set([]) self.utf8decoder = codecs.getincrementaldecoder('utf-8')() if self._logtobuffer: self.logger = log.Logger(node, console=True, tenant=configmanager.tenant) (text, termstate, timestamp) = (b'', 0, False) # when reading from log file, we will use wall clock # it should usually match walltime. self.lasttime = 0 if timestamp: timediff = time.time() - timestamp if timediff > 0: self.lasttime = util.monotonic_time() - timediff else: # wall clock has gone backwards, use current time as best # guess self.lasttime = util.monotonic_time() self.clearbuffer() self.appmodedetected = False self.shiftin = None self.reconnect = None if termstate & 1: self.appmodedetected = True if termstate & 2: self.shiftin = b'0' self.users = {} self._attribwatcher = None self._console = None self.connectionthread = None self.send_break = None if self._genwatchattribs: self._attribwatcher = self.cfgmgr.watch_attributes( (self.node, ), self._genwatchattribs, self._attribschanged) self.check_isondemand() if not self._isondemand: self.connectstate = 'connecting' eventlet.spawn(self._connect)
def _daemonize(): if not 'fork' in os.__dict__: return thispid = os.fork() if thispid > 0: os.waitpid(thispid, 0) os._exit(0) os.setsid() thispid = os.fork() if thispid > 0: print('confluent server starting as pid {0}'.format(thispid)) os._exit(0) os.closerange(0, 2) os.umask(63) os.open(os.devnull, os.O_RDWR) os.dup2(0, 1) os.dup2(0, 2) sys.stdout = log.Logger('stdout', buffered=False) sys.stderr = log.Logger('stderr', buffered=False) log.daemonized = True
def initialize(): global _tracelog global _bufferdaemon global _bufferlock _bufferlock = semaphore.Semaphore() _tracelog = log.Logger('trace') _bufferdaemon = subprocess.Popen(['/opt/confluent/bin/vtbufferd'], bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE) fl = fcntl.fcntl(_bufferdaemon.stdout.fileno(), fcntl.F_GETFL) fcntl.fcntl(_bufferdaemon.stdout.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK)
def __init__(self, node, configmanager): self._dologging = True self._isondemand = False self.error = None self.cfgmgr = configmanager self.node = node self.connectstate = 'unconnected' self._isalive = True self.buffer = bytearray() self.livesessions = set([]) if self._logtobuffer: self.logger = log.Logger(node, console=True, tenant=configmanager.tenant) (text, termstate, timestamp) = self.logger.read_recent_text(8192) else: (text, termstate, timestamp) = ('', 0, False) # when reading from log file, we will use wall clock # it should usually match walltime. self.lasttime = 0 if timestamp: timediff = time.time() - timestamp if timediff > 0: self.lasttime = util.monotonic_time() - timediff else: # wall clock has gone backwards, use current time as best # guess self.lasttime = util.monotonic_time() self.buffer += text self.appmodedetected = False self.shiftin = None self.reconnect = None if termstate & 1: self.appmodedetected = True if termstate & 2: self.shiftin = '0' self.users = {} self._attribwatcher = None self._console = None self.connectionthread = None self.send_break = None if self._genwatchattribs: self._attribwatcher = self.cfgmgr.watch_attributes( (self.node, ), self._genwatchattribs, self._attribschanged) self.check_isondemand() if not self._isondemand: self.connectstate = 'connecting' eventlet.spawn(self._connect)
def execupdate(handler, filename, updateobj, type, owner, node): global _tracelog if type != 'ffdc' and not os.path.exists(filename): errstr = '{0} does not appear to exist on {1}'.format( filename, socket.gethostname()) updateobj.handle_progress({ 'phase': 'error', 'progress': 0.0, 'detail': errstr }) return if type == 'ffdc' and os.path.isdir(filename): filename += '/' + node try: if type == 'firmware': completion = handler(filename, progress=updateobj.handle_progress, bank=updateobj.bank) else: completion = handler(filename, progress=updateobj.handle_progress) if completion is None: completion = 'complete' if owner: pwent = pwd.getpwnam(owner) os.chown(filename, pwent.pw_uid, pwent.pw_gid) updateobj.handle_progress({'phase': completion, 'progress': 100.0}) except exc.PubkeyInvalid as pi: errstr = 'Certificate mismatch detected, does not match value in ' \ 'attribute {0}'.format(pi.attrname) updateobj.handle_progress({ 'phase': 'error', 'progress': 0.0, 'detail': errstr }) except Exception as e: if _tracelog is None: _tracelog = log.Logger('trace') _tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event, event=log.Events.stacktrace) updateobj.handle_progress({ 'phase': 'error', 'progress': 0.0, 'detail': str(e) })
def snoop(handler, protocol=None): """Watch for SLP activity handler will be called with a dictionary of relevant attributes :param handler: :return: """ tracelog = log.Logger('trace') try: active_scan(handler, protocol) except Exception as e: tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event, event=log.Events.stacktrace) net = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) net.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) slpg = socket.inet_pton(socket.AF_INET6, 'ff01::123') slpg2 = socket.inet_pton(socket.AF_INET6, 'ff02::123') for i6idx in util.list_interface_indexes(): mreq = slpg + struct.pack('=I', i6idx) net.setsockopt(IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq) mreq = slpg2 + struct.pack('=I', i6idx) net.setsockopt(IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq) net4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) net.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) net4.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) for i4 in util.list_ips(): if 'broadcast' not in i4: continue slpmcast = socket.inet_aton('239.255.255.253') + \ socket.inet_aton(i4['addr']) try: net4.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, slpmcast) except socket.error as e: if e.errno != 98: raise # socket in use can occur when aliased ipv4 are encountered net.bind(('', 427)) net4.bind(('', 427)) while True: try: newmacs = set([]) r, _, _ = select.select((net, net4), (), (), 60) # clear known_peers and peerbymacaddress # to avoid stale info getting in... # rely upon the select(0.2) to catch rapid fire and aggregate ip # addresses that come close together # calling code needs to understand deeper context, as snoop # will now yield dupe info over time known_peers = set([]) peerbymacaddress = {} while r: for s in r: (rsp, peer) = s.recvfrom(9000) ip = peer[0].partition('%')[0] if peer in known_peers: continue if ip not in neighutil.neightable: neighutil.update_neigh() if ip not in neighutil.neightable: continue known_peers.add(peer) mac = neighutil.neightable[ip] if mac in peerbymacaddress: peerbymacaddress[mac]['addresses'].append(peer) else: q = query_srvtypes(peer) if not q or not q[0]: # SLP might have started and not ready yet # ignore for now known_peers.discard(peer) continue # we want to prioritize the very well known services svcs = [] for svc in q: if svc in _slp_services: svcs.insert(0, svc) else: svcs.append(svc) peerbymacaddress[mac] = { 'services': svcs, 'addresses': [peer], } newmacs.add(mac) r, _, _ = select.select((net, net4), (), (), 0.2) for mac in newmacs: peerbymacaddress[mac]['xid'] = 1 _add_attributes(peerbymacaddress[mac]) peerbymacaddress[mac]['hwaddr'] = mac peerbymacaddress[mac]['protocol'] = protocol for srvurl in peerbymacaddress[mac].get('urls', ()): if len(srvurl) > 4: srvurl = srvurl[:-3] if srvurl.endswith('://Athena:'): continue if 'service:ipmi' in peerbymacaddress[mac]['services']: continue if 'service:lightttpd' in peerbymacaddress[mac]['services']: currinf = peerbymacaddress[mac] curratt = currinf.get('attributes', {}) if curratt.get('System-Manufacturing', [None])[0] == 'Lenovo' and curratt.get( 'type', [None])[0] == 'LenovoThinkServer': peerbymacaddress[mac]['services'] = [ 'service:lenovo-tsm' ] else: continue handler(peerbymacaddress[mac]) except Exception as e: tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event, event=log.Events.stacktrace)
def start(self): global auditlog global tracelog tracelog = log.Logger('trace') auditlog = log.Logger('audit') self.server = eventlet.spawn(serve, self.bind_host, self.bind_port)
def start_console_sessions(): global _tracelog _tracelog = log.Logger('trace') configmodule.hook_new_configmanagers(_start_tenant_sessions)
def snoop(handler, byehandler=None, protocol=None, uuidlookup=None): """Watch for SSDP notify messages The handler shall be called on any service coming online. byehandler is called whenever a system advertises that it is departing. If no byehandler is specified, byebye messages are ignored. The handler is given (as possible), the mac address, a list of viable sockaddrs to reference the peer, and the notification type (e.g. 'urn:dmtf-org:service:redfish-rest:1' :param handler: A handler for online notifications from network :param byehandler: Optional handler for devices going off the network """ # Normally, I like using v6/v4 agnostic socket. However, since we are # dabbling in multicast wizardry here, such sockets can cause big problems, # so we will have two distinct sockets tracelog = log.Logger('trace') known_peers = set([]) net6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) for ifidx in util.list_interface_indexes(): v6grp = ssdp6mcast + struct.pack('=I', ifidx) net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, v6grp) net6.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) net4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) for i4 in util.list_ips(): ssdp4mcast = socket.inet_pton(socket.AF_INET, mcastv4addr) + \ socket.inet_aton(i4['addr']) try: net4.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, ssdp4mcast) except socket.error as e: if e.errno != 98: # errno 98 can happen if aliased, skip for now raise net4.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) net4.bind(('', 1900)) net6.bind(('', 1900)) peerbymacaddress = {} while True: try: newmacs = set([]) machandlers = {} r, _, _ = select.select((net4, net6), (), (), 60) while r: for s in r: (rsp, peer) = s.recvfrom(9000) if rsp[:4] == b'PING': continue rsp = rsp.split(b'\r\n') method, _, _ = rsp[0].split(b' ', 2) if method == b'NOTIFY': ip = peer[0].partition('%')[0] if peer in known_peers: continue if ip not in neighutil.neightable: neighutil.update_neigh() if ip not in neighutil.neightable: continue mac = neighutil.neightable[ip] known_peers.add(peer) newmacs.add(mac) if mac in peerbymacaddress: peerbymacaddress[mac]['addresses'].append(peer) else: peerbymacaddress[mac] = { 'hwaddr': mac, 'addresses': [peer], } peerdata = peerbymacaddress[mac] for headline in rsp[1:]: if not headline: continue headline = util.stringify(headline) header, _, value = headline.partition(':') header = header.strip() value = value.strip() if header == 'NT': peerdata['service'] = value elif header == 'NTS': if value == 'ssdp:byebye': machandlers[mac] = byehandler elif value == 'ssdp:alive': machandlers[mac] = None # handler elif method == b'M-SEARCH': if not uuidlookup: continue #ip = peer[0].partition('%')[0] for headline in rsp[1:]: if not headline: continue headline = util.stringify(headline) headline = headline.partition(':') if len(headline) < 3: continue if headline[0] == 'ST' and headline[-1].startswith( ' urn:xcat.org:service:confluent:'): try: cfm.check_quorum() except Exception: continue for query in headline[-1].split('/'): if query.startswith('uuid='): curruuid = query.split('=', 1)[1].lower() node = uuidlookup(curruuid) if not node: break # Do not bother replying to a node that # we have no deployment activity # planned for cfg = cfm.ConfigManager(None) cfd = cfg.get_node_attributes( node, [ 'deployment.pendingprofile', 'collective.managercandidates' ]) if not cfd.get(node, {}).get( 'deployment.pendingprofile', {}).get('value', None): break candmgrs = cfd.get(node, {}).get( 'collective.managercandidates', {}).get('value', None) if candmgrs: candmgrs = noderange.NodeRange( candmgrs, cfg).nodes if collective.get_myname( ) not in candmgrs: break currtime = time.time() seconds = int(currtime) msecs = int(currtime * 1000 % 1000) reply = 'HTTP/1.1 200 OK\r\nNODENAME: {0}\r\nCURRTIME: {1}\r\nCURRMSECS: {2}\r\n'.format( node, seconds, msecs) if '%' in peer[0]: iface = peer[0].split('%', 1)[1] reply += 'MGTIFACE: {0}\r\n'.format( peer[0].split('%', 1)[1]) ncfg = netutil.get_nic_config( cfg, node, ifidx=iface) if ncfg.get( 'matchesnodename', None): reply += 'DEFAULTNET: 1\r\n' elif not netutil.address_is_local( peer[0]): continue if not isinstance(reply, bytes): reply = reply.encode('utf8') s.sendto(reply, peer) r, _, _ = select.select((net4, net6), (), (), 0.2) for mac in newmacs: thehandler = machandlers.get(mac, None) if thehandler: thehandler(peerbymacaddress[mac]) except Exception: tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event, event=log.Events.stacktrace)
def execupdate(handler, filename, updateobj, type, owner, node): global _tracelog if type != 'ffdc': errstr = False if not os.path.exists(filename): errstr = '{0} does not appear to exist on {1}, or is in a directory with permissions forbidding confluent user/group access'.format( filename, socket.gethostname()) elif not os.access(filename, os.R_OK): errstr = '{0} is not readable by confluent on {1} (ensure confluent user or group can access file and parent directories)'.format( filename, socket.gethostname()) if errstr: updateobj.handle_progress({ 'phase': 'error', 'progress': 0.0, 'detail': errstr }) return if type == 'ffdc' and os.path.isdir(filename): filename += '/' + node if 'type' == 'ffdc': errstr = False if os.path.exists(filename): errstr = '{0} already exists on {1}, cannot overwrite'.format( filename, socket.gethostname()) elif not os.access(os.path.dirname(filename), os.W_OK): errstr = '{0} directory not writable by confluent user/group on {1}, check the directory and parent directory ownership and permissions'.format( filename, socket.gethostname()) if errstr: updateobj.handle_progress({ 'phase': 'error', 'progress': 0.0, 'detail': errstr }) return try: if type == 'firmware': completion = handler(filename, progress=updateobj.handle_progress, bank=updateobj.bank) else: completion = handler(filename, progress=updateobj.handle_progress) if type == 'ffdc' and completion: filename = completion completion = None if completion is None: completion = 'complete' if owner: pwent = pwd.getpwnam(owner) os.chown(filename, pwent.pw_uid, pwent.pw_gid) updateobj.handle_progress({'phase': completion, 'progress': 100.0}) except exc.PubkeyInvalid as pi: errstr = 'Certificate mismatch detected, does not match value in ' \ 'attribute {0}'.format(pi.attrname) updateobj.handle_progress({ 'phase': 'error', 'progress': 0.0, 'detail': errstr }) except Exception as e: if _tracelog is None: _tracelog = log.Logger('trace') _tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event, event=log.Events.stacktrace) updateobj.handle_progress({ 'phase': 'error', 'progress': 0.0, 'detail': str(e) })
def _redirectoutput(): os.umask(63) sys.stdout = log.Logger('stdout', buffered=False) sys.stderr = log.Logger('stderr', buffered=False)