def await_notifications(callback, revocation_cert_path): # keep old typo "listen_notfications" around for a few versions assert config.getboolean( "cloud_agent", "listen_notifications", fallback=False) or config.getboolean( "cloud_agent", "listen_notfications", fallback=False) try: import zmq # pylint: disable=import-outside-toplevel except ImportError as error: raise Exception( "install PyZMQ for 'listen_notifications' option") from error if revocation_cert_path is None: raise Exception("must specify revocation_cert_path") context = zmq.Context() mysock = context.socket(zmq.SUB) mysock.setsockopt(zmq.SUBSCRIBE, b"") mysock.connect(f"tcp://{config.get('general', 'receive_revocation_ip')}:" f"{config.getint('general', 'receive_revocation_port')}") logger.info( "Waiting for revocation messages on 0mq %s:%s", config.get("general", "receive_revocation_ip"), config.getint("general", "receive_revocation_port"), ) while True: rawbody = mysock.recv() body = json.loads(rawbody) process_revocation(body, callback, revocation_cert_path)
def await_notifications(callback, revocation_cert_path): # keep old typo "listen_notfications" around for a few versions assert config.getboolean( "cloud_agent", "listen_notifications", fallback=False) or config.getboolean( "cloud_agent", "listen_notfications", fallback=False) try: import zmq # pylint: disable=import-outside-toplevel except ImportError as error: raise Exception( "install PyZMQ for 'listen_notifications' option") from error global cert_key if revocation_cert_path is None: raise Exception("must specify revocation_cert_path") context = zmq.Context() mysock = context.socket(zmq.SUB) mysock.setsockopt(zmq.SUBSCRIBE, b"") mysock.connect(f"tcp://{config.get('general', 'receive_revocation_ip')}:" f"{config.getint('general', 'receive_revocation_port')}") logger.info( "Waiting for revocation messages on 0mq %s:%s", config.get("general", "receive_revocation_ip"), config.getint("general", "receive_revocation_port"), ) while True: rawbody = mysock.recv() body = json.loads(rawbody) if cert_key is None: # load up the CV signing public key if revocation_cert_path is not None and os.path.exists( revocation_cert_path): logger.info("Lazy loading the revocation certificate from %s", revocation_cert_path) with open(revocation_cert_path, "rb") as f: certpem = f.read() cert_key = crypto.x509_import_pubkey(certpem) if cert_key is None: logger.warning( "Unable to check signature of revocation message: %s not available", revocation_cert_path) elif "signature" not in body or body["signature"] == "none": logger.warning("No signature on revocation message from server") elif not crypto.rsa_verify(cert_key, body["msg"].encode("utf-8"), body["signature"].encode("utf-8")): logger.error("Invalid revocation message siganture %s", body) else: message = json.loads(body["msg"]) logger.debug("Revocation signature validated for revocation: %s", message) callback(message)
def notify_error(agent, msgtype='revocation'): if not config.getboolean('cloud_verifier', 'revocation_notifier'): return # prepare the revocation message: revocation = { 'type': msgtype, 'ip': agent['ip'], 'agent_id': agent['agent_id'], 'port': agent['port'], 'tpm_policy': agent['tpm_policy'], 'vtpm_policy': agent['vtpm_policy'], 'meta_data': agent['meta_data'], 'event_time': time.asctime() } tosend = {'msg': json.dumps(revocation).encode('utf-8')} # also need to load up private key for signing revocations if agent['revocation_key'] != "": signing_key = crypto.rsa_import_privkey(agent['revocation_key']) tosend['signature'] = crypto.rsa_sign(signing_key, tosend['msg']) else: tosend['signature'] = "none" revocation_notifier.notify(tosend)
def invalid(self) -> Failure: failure = Failure(Component.IMA, ["validation"]) if self.pcr != str(config.IMA_PCR): logger.warning("IMA entry PCR does not match %s. It was: %s", config.IMA_PCR, self.pcr) failure.add_event("ima_pcr", {"message": "IMA PCR is not the configured one", "expected": str(config.IMA_PCR), "got": self.pcr}, True) # Ignore template hash for ToMToU errors if self.ima_template_hash == get_FF_HASH(self._ima_hash_alg): logger.warning("Skipped template_hash validation entry with FF_HASH") # By default ToMToU errors are not treated as a failure if config.getboolean("cloud_verifier", "tomtou_errors", fallback=False): failure.add_event("tomtou", "hash validation was skipped", True) return failure if self.ima_template_hash != self._ima_hash_alg.hash(self._bytes): failure.add_event("ima_hash", {"message": "IMA template hash does not match the calculated hash.", "expected": str(self.ima_template_hash), "got": str(self.mode.bytes())}, True) return failure if self._validator is None: failure.add_event("no_validator", "No validator specified", True) return failure failure.merge(self.mode.is_data_valid(self._validator)) return failure
def worker_webhook(tosend, url): interval = config.getfloat('cloud_verifier', 'retry_interval') exponential_backoff = config.getboolean('cloud_verifier', 'exponential_backoff') session = requests.session() logger.info("Sending revocation event via webhook...") for i in range(config.getint('cloud_verifier', 'max_retries')): next_retry = retry.retry_time(exponential_backoff, interval, i, logger) try: response = session.post(url, json=tosend, timeout=5) if response.status_code in [200, 202]: break logger.debug( "Unable to publish revocation message %d times via webhook, " "trying again in %d seconds. " "Server returned status code: %s", i, next_retry, response.status_code) except requests.exceptions.RequestException as e: logger.debug( "Unable to publish revocation message %d times via webhook, " "trying again in %d seconds: %s", i, next_retry, e) time.sleep(next_retry)
def get_tls_context(): ca_cert = config.get('tenant', 'ca_cert') tls_dir = config.get('tenant', 'tls_dir') if tls_dir == 'default': ca_cert = 'cacert.crt' tls_dir = 'cv_ca' # this is relative path, convert to absolute in WORK_DIR if tls_dir[0] != '/': tls_dir = os.path.abspath('%s/%s' % (config.WORK_DIR, tls_dir)) logger.info("Setting up client TLS in %s", tls_dir) ca_path = "%s/%s" % (tls_dir, ca_cert) my_tls_cert = "%s/%s" % (tls_dir, my_cert) my_tls_priv_key = "%s/%s" % (tls_dir, my_priv_key) context = ssl.create_default_context() context.load_verify_locations(cafile=ca_path) context.load_cert_chain(certfile=my_tls_cert, keyfile=my_tls_priv_key) context.verify_mode = ssl.CERT_REQUIRED context.check_hostname = config.getboolean('general', 'tls_check_hostnames') return context
def __init__(self): """ Set up required values and TLS """ self.nonce = None self.agent_ip = None self.agent_port = config.get('cloud_agent', 'cloudagent_port') self.verifier_ip = config.get('tenant', 'cloudverifier_ip') self.verifier_port = config.get('tenant', 'cloudverifier_port') self.registrar_ip = config.get('tenant', 'registrar_ip') self.registrar_port = config.get('tenant', 'registrar_port') self.webapp_port = config.getint('webapp', 'webapp_port') if not config.REQUIRE_ROOT and self.webapp_port < 1024: self.webapp_port += 2000 self.webapp_ip = config.get('webapp', 'webapp_ip') self.my_cert, self.my_priv_key = self.get_tls_context() self.cert = (self.my_cert, self.my_priv_key) if config.getboolean('general', "enable_tls"): self.tls_enabled = True else: self.tls_enabled = False self.cert = "" logger.warning( "Warning: TLS is currently disabled, keys will be sent in the clear! This should only be used for testing." )
def check_ek(self, ek, ekcert, tpm): """ Check the Entity Key Arguments: ek {[type]} -- [description] ekcert {[type]} -- [description] tpm {[type]} -- [description] Returns: [type] -- [description] """ if config.getboolean('tenant', 'require_ek_cert'): if config.STUB_TPM: logger.debug("not checking ekcert due to STUB_TPM mode") elif ekcert == 'virtual': logger.debug("not checking ekcert of VTPM") elif ekcert == 'emulator' and config.DISABLE_EK_CERT_CHECK_EMULATOR: logger.debug("not checking ekcert of TPM emulator") elif ekcert is None: logger.warning( "No EK cert provided, require_ek_cert option in config set to True") return False elif not tpm.verify_ek(base64.b64decode(ekcert), ek): logger.warning("Invalid EK certificate") return False return True
def get_notifiers(): notifiers = set( config.get("cloud_verifier", "revocation_notifiers", fallback="").split(",")) if ("zeromq" not in notifiers) and config.getboolean( "cloud_verifier", "revocation_notifier", fallback=False): logger.warning( "Warning: 'revocation_notifier' option is deprecated; use 'revocation_notifiers'" ) notifiers.add("zeromq") if ("webhook" not in notifiers) and config.getboolean( "cloud_verifier", "revocation_notifier_webhook", fallback=False): logger.warning( "Warning: 'revocation_notifier_webhook' option is deprecated; use 'revocation_notifiers'" ) notifiers.add("webhook") return notifiers.intersection({"zeromq", "webhook", "agent"})
def main(): # if we are configured to auto-migrate the DB, check if there are any migrations to perform if config.has_option("cloud_verifier", "auto_migrate_db") and config.getboolean( "cloud_verifier", "auto_migrate_db"): keylime.cmd.migrations_apply.apply("cloud_verifier") cloud_verifier_tornado.main()
def main(argv=sys.argv): # if we are configured to auto-migrate the DB, check if there are any migrations to perform if config.has_option('registrar', 'auto_migrate_db') and config.getboolean( 'registrar', 'auto_migrate_db'): keylime.cmd.migrations_apply.apply('registrar') registrar_common.start(config.get('registrar', 'registrar_ip'), config.getint('registrar', 'registrar_tls_port'), config.getint('registrar', 'registrar_port'))
def main(): # if we are configured to auto-migrate the DB, check if there are any migrations to perform if config.has_option("registrar", "auto_migrate_db") and config.getboolean( "registrar", "auto_migrate_db"): keylime.cmd.migrations_apply.apply("registrar") registrar_common.start( config.get("registrar", "registrar_ip"), config.getint("registrar", "registrar_tls_port"), config.getint("registrar", "registrar_port"), )
def init_client_tls(section): global tls_cert_info global tls_enabled # make this reentrant if tls_cert_info: return if not config.getboolean('general', "enable_tls"): logger.warning("TLS is currently disabled, AIKs may not be authentic.") return logger.warning("TLS is enabled.") tls_enabled = True logger.info("Setting up client TLS...") tls_dir = config.get(section, 'registrar_tls_dir') my_cert = config.get(section, 'registrar_my_cert') my_priv_key = config.get(section, 'registrar_private_key') my_key_pw = config.get(section, 'registrar_private_key_pw') if tls_dir == 'default': tls_dir = 'reg_ca' my_cert = 'client-cert.crt' my_priv_key = 'client-private.pem' if tls_dir == 'CV': tls_dir = 'cv_ca' my_cert = 'client-cert.crt' my_priv_key = 'client-private.pem' # this is relative path, convert to absolute in WORK_DIR if tls_dir[0] != '/': tls_dir = os.path.abspath('%s/%s' % (config.WORK_DIR, tls_dir)) ca_cert = config.get(section, 'registrar_ca_cert') if ca_cert == 'default': ca_path = "%s/cacert.crt" % (tls_dir) else: ca_path = "%s/%s" % (tls_dir, ca_cert) if os.path.isabs(my_cert): tls_cert = my_cert else: tls_cert = "%s/%s" % (tls_dir, my_cert) if os.path.isabs(my_priv_key): tls_priv_key = my_priv_key else: tls_priv_key = "%s/%s" % (tls_dir, my_priv_key) tls_cert_info = (tls_cert, tls_priv_key)
def init_client_tls(section): global tls_cert_info global tls_enabled global ca_cert # make this reentrant if tls_cert_info: return if not config.getboolean("general", "enable_tls"): logger.warning( "Warning: TLS is currently disabled, AIKs may not be authentic.") return logger.warning("TLS is enabled.") tls_enabled = True logger.info("Setting up client TLS...") tls_dir = config.get(section, "registrar_tls_dir") ca_cert = config.get(section, "registrar_ca_cert") my_cert = config.get(section, "registrar_my_cert") my_priv_key = config.get(section, "registrar_private_key") if tls_dir == "default": tls_dir = "reg_ca" ca_cert = "cacert.crt" my_cert = "client-cert.crt" my_priv_key = "client-private.pem" if tls_dir == "CV": tls_dir = "cv_ca" ca_cert = "cacert.crt" my_cert = "client-cert.crt" my_priv_key = "client-private.pem" # this is relative path, convert to absolute in WORK_DIR if tls_dir[0] != "/": tls_dir = os.path.abspath(os.path.join(config.WORK_DIR, tls_dir)) if not os.path.isabs(ca_cert): ca_cert = os.path.join(tls_dir, ca_cert) if os.path.isabs(my_cert): tls_cert = my_cert else: tls_cert = os.path.join(tls_dir, my_cert) if os.path.isabs(my_priv_key): tls_priv_key = my_priv_key else: tls_priv_key = os.path.join(tls_dir, my_priv_key) tls_cert_info = (tls_cert, tls_priv_key)
def revocation_listener(): """ This configures and starts the revocation listener. It is designed to be started in a separate process. """ if config.has_option("cloud_agent", "listen_notifications"): if not config.getboolean("cloud_agent", "listen_notifications"): return # keep old typo "listen_notfications" around for a few versions if config.has_option("cloud_agent", "listen_notfications"): logger.warning( 'Option typo "listen_notfications" is deprecated. Please use "listen_notifications" instead.' ) if not config.getboolean("cloud_agent", "listen_notfications"): return secdir = secure_mount.mount() cert_path = config.get("cloud_agent", "revocation_cert") if cert_path == "default": cert_path = os.path.join(secdir, "unzipped/RevocationNotifier-cert.crt") elif cert_path[0] != "/": # if it is a relative, convert to absolute in work_dir cert_path = os.path.abspath(os.path.join(config.WORK_DIR, cert_path)) try: while True: try: revocation_notifier.await_notifications( perform_actions, revocation_cert_path=cert_path) except Exception as e: logger.exception(e) logger.warning( "No connection to revocation server, retrying in 10s...") time.sleep(10) except (KeyboardInterrupt, SystemExit): logger.info("Stopping revocation listener...")
def notify_error(agent, msgtype='revocation', event=None): send_mq = config.getboolean('cloud_verifier', 'revocation_notifier') send_webhook = config.getboolean('cloud_verifier', 'revocation_notifier_webhook', fallback=False) if not (send_mq or send_webhook): return # prepare the revocation message: revocation = { 'type': msgtype, 'ip': agent['ip'], 'agent_id': agent['agent_id'], 'port': agent['port'], 'tpm_policy': agent['tpm_policy'], 'vtpm_policy': agent['vtpm_policy'], 'meta_data': agent['meta_data'], 'event_time': time.asctime() } if event: revocation['event_id'] = event.event_id revocation['severity_label'] = event.severity_label.name revocation['context'] = event.context tosend = {'msg': json.dumps(revocation).encode('utf-8')} # also need to load up private key for signing revocations if agent['revocation_key'] != "": signing_key = crypto.rsa_import_privkey(agent['revocation_key']) tosend['signature'] = crypto.rsa_sign(signing_key, tosend['msg']) else: tosend['signature'] = "none" if send_mq: revocation_notifier.notify(tosend) if send_webhook: revocation_notifier.notify_webhook(tosend)
def notify_error(agent, msgtype="revocation", event=None): send_mq = config.getboolean("cloud_verifier", "revocation_notifier") send_webhook = config.getboolean("cloud_verifier", "revocation_notifier_webhook", fallback=False) if not (send_mq or send_webhook): return # prepare the revocation message: revocation = { "type": msgtype, "ip": agent["ip"], "agent_id": agent["agent_id"], "port": agent["port"], "tpm_policy": agent["tpm_policy"], "meta_data": agent["meta_data"], "event_time": time.asctime(), } if event: revocation["event_id"] = event.event_id revocation["severity_label"] = event.severity_label.name revocation["context"] = event.context tosend = {"msg": json.dumps(revocation).encode("utf-8")} # also need to load up private key for signing revocations if agent["revocation_key"] != "": signing_key = crypto.rsa_import_privkey(agent["revocation_key"]) tosend["signature"] = crypto.rsa_sign(signing_key, tosend["msg"]) else: tosend["signature"] = "none" if send_mq: revocation_notifier.notify(tosend) if send_webhook: revocation_notifier.notify_webhook(tosend)
def notify(tosend): assert config.getboolean("cloud_verifier", "revocation_notifier") try: import zmq # pylint: disable=import-outside-toplevel except ImportError as error: raise Exception( "install PyZMQ for 'revocation_notifier' option") from error # python-requests internally uses either simplejson (preferred) or # the built-in json module, and when it is using the built-in one, # it may encounter difficulties handling bytes instead of strings. # To avoid such issues, let's convert `tosend' to str beforehand. tosend = json.bytes_to_str(tosend) def worker(tosend): context = zmq.Context() mysock = context.socket(zmq.PUB) mysock.connect(f"ipc://{_SOCKET_PATH}") # wait 100ms for connect to happen time.sleep(0.2) # now send it out via 0mq logger.info("Sending revocation event to listening nodes...") for i in range(config.getint("cloud_verifier", "max_retries")): try: mysock.send_string(json.dumps(tosend)) break except Exception as e: interval = config.getfloat("cloud_verifier", "retry_interval") exponential_backoff = config.getboolean( "cloud_verifier", "exponential_backoff") next_retry = retry.retry_time(exponential_backoff, interval, i, logger) logger.debug( "Unable to publish revocation message %d times, trying again in %f seconds: %s", i, next_retry, e) time.sleep(next_retry) mysock.close() cb = functools.partial(worker, tosend) t = threading.Thread(target=cb) t.start()
def start_broker(): assert config.getboolean("cloud_verifier", "revocation_notifier") try: import zmq # pylint: disable=import-outside-toplevel except ImportError as error: raise Exception( "install PyZMQ for 'revocation_notifier' option") from error def worker(): # do not receive signals form the parent process os.setpgrp() signal.signal(signal.SIGTERM, lambda *_: sys.exit(0)) dir_name = os.path.dirname(_SOCKET_PATH) if not os.path.exists(dir_name): os.makedirs(dir_name, 0o700) else: if os.stat(dir_name).st_mode & 0o777 != 0o700: msg = f"{dir_name} present with wrong permissions" logger.error(msg) raise Exception(msg) context = zmq.Context(1) frontend = context.socket(zmq.SUB) frontend.bind(f"ipc://{_SOCKET_PATH}") frontend.setsockopt(zmq.SUBSCRIBE, b"") # Socket facing services backend = context.socket(zmq.PUB) backend.bind( f"tcp://{config.get('cloud_verifier', 'revocation_notifier_ip')}:" f"{config.getint('cloud_verifier', 'revocation_notifier_port')}") try: zmq.device(zmq.FORWARDER, frontend, backend) except (KeyboardInterrupt, SystemExit): context.destroy() global broker_proc broker_proc = Process(target=worker, name="zeroMQ") broker_proc.start()
def check_ek(self, ekcert): """ Check the Entity Key Arguments: ekcert {str} -- The endorsement key, either None, "emulator", or base64 encoded der cert Returns: [type] -- [description] """ if config.getboolean('tenant', 'require_ek_cert'): if config.STUB_TPM: logger.debug("Not checking ekcert due to STUB_TPM mode") elif ekcert == 'emulator' and config.DISABLE_EK_CERT_CHECK_EMULATOR: logger.info("Not checking ekcert of TPM emulator") elif ekcert is None: logger.warning("No EK cert provided, require_ek_cert option in config set to True") return False elif not self.tpm_instance.verify_ek(base64.b64decode(ekcert)): logger.warning("Invalid EK certificate") return False return True
def worker(tosend): context = zmq.Context() mysock = context.socket(zmq.PUB) mysock.connect(f"ipc://{_SOCKET_PATH}") # wait 100ms for connect to happen time.sleep(0.2) # now send it out via 0mq logger.info("Sending revocation event to listening nodes...") for i in range(config.getint('cloud_verifier', 'max_retries')): try: mysock.send_string(json.dumps(tosend)) break except Exception as e: interval = config.getfloat('cloud_verifier', 'retry_interval') exponential_backoff = config.getboolean( 'cloud_verifier', 'exponential_backoff') next_retry = retry.retry_time(exponential_backoff, interval, i, logger) logger.debug( "Unable to publish revocation message %d times, trying again in %f seconds: %s", i, next_retry, e) time.sleep(next_retry) mysock.close()
def init_mtls(section='cloud_verifier', generatedir='cv_ca'): if not config.getboolean('general', "enable_tls"): logger.warning( "Warning: TLS is currently disabled, keys will be sent in the clear! This should only be used for testing." ) return None logger.info("Setting up TLS...") my_cert = config.get(section, 'my_cert') ca_cert = config.get(section, 'ca_cert') my_priv_key = config.get(section, 'private_key') my_key_pw = config.get(section, 'private_key_pw') tls_dir = config.get(section, 'tls_dir') if tls_dir == 'generate': if my_cert != 'default' or my_priv_key != 'default' or ca_cert != 'default': raise Exception( "To use tls_dir=generate, options ca_cert, my_cert, and private_key must all be set to 'default'" ) if generatedir[0] != '/': generatedir = os.path.abspath('%s/%s' % (config.WORK_DIR, generatedir)) tls_dir = generatedir ca_path = "%s/cacert.crt" % (tls_dir) if os.path.exists(ca_path): logger.info( "Existing CA certificate found in %s, not generating a new one" % (tls_dir)) else: logger.info( "Generating a new CA in %s and a client certificate for connecting" % tls_dir) logger.info("use keylime_ca -d %s to manage this CA" % tls_dir) if not os.path.exists(tls_dir): os.makedirs(tls_dir, 0o700) if my_key_pw == 'default': logger.warning( "CAUTION: using default password for CA, please set private_key_pw to a strong password" ) ca_util.setpassword(my_key_pw) ca_util.cmd_init(tls_dir) ca_util.cmd_mkcert(tls_dir, socket.gethostname()) ca_util.cmd_mkcert(tls_dir, 'client') if tls_dir == 'CV': if section != 'registrar': raise Exception( "You only use the CV option to tls_dir for the registrar not %s" % section) tls_dir = os.path.abspath('%s/%s' % (config.WORK_DIR, 'cv_ca')) if not os.path.exists("%s/cacert.crt" % (tls_dir)): raise Exception( "It appears that the verifier has not yet created a CA and certificates, please run the verifier first" ) # if it is relative path, convert to absolute in WORK_DIR if tls_dir[0] != '/': tls_dir = os.path.abspath('%s/%s' % (config.WORK_DIR, tls_dir)) if ca_cert == 'default': ca_path = "%s/cacert.crt" % (tls_dir) elif not os.path.isabs(ca_cert): ca_path = "%s/%s" % (tls_dir, ca_cert) else: ca_path = ca_cert if my_cert == 'default': my_cert = "%s/%s-cert.crt" % (tls_dir, socket.gethostname()) elif not os.path.isabs(my_cert): my_cert = "%s/%s" % (tls_dir, my_cert) else: pass if my_priv_key == 'default': my_priv_key = "%s/%s-private.pem" % (tls_dir, socket.gethostname()) elif not os.path.isabs(my_priv_key): my_priv_key = "%s/%s" % (tls_dir, my_priv_key) try: context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) context.load_verify_locations(cafile=ca_path) context.load_cert_chain(certfile=my_cert, keyfile=my_priv_key, password=my_key_pw) if (config.has_option(section, 'check_client_cert') and config.getboolean(section, 'check_client_cert')): context.verify_mode = ssl.CERT_REQUIRED except ssl.SSLError as exc: if exc.reason == 'EE_KEY_TOO_SMALL': logger.error('Higher key strength is required for keylime ' 'running on this system. If keylime is responsible ' 'to generate the certificate, please raise the value ' 'of configuration option [ca]cert_bits, remove ' 'generated certificate and re-run keylime service') raise exc return context
def main(): """Main method of the Cloud Verifier Server. This method is encapsulated in a function for packaging to allow it to be called as a function by an external program.""" cloudverifier_port = config.get('cloud_verifier', 'cloudverifier_port') cloudverifier_host = config.get('cloud_verifier', 'cloudverifier_ip') # allow tornado's max upload size to be configurable max_upload_size = None if config.has_option('cloud_verifier', 'max_upload_size'): max_upload_size = int(config.get('cloud_verifier', 'max_upload_size')) VerfierMain.metadata.create_all(engine, checkfirst=True) session = get_session() try: query_all = session.query(VerfierMain).all() for row in query_all: if row.operational_state in states.APPROVED_REACTIVATE_STATES: row.operational_state = states.START session.commit() except SQLAlchemyError as e: logger.error('SQLAlchemy Error: %s', e) num = session.query(VerfierMain.agent_id).count() if num > 0: agent_ids = session.query(VerfierMain.agent_id).all() logger.info("Agent ids in db loaded from file: %s", agent_ids) logger.info('Starting Cloud Verifier (tornado) on port %s, use <Ctrl-C> to stop', cloudverifier_port) app = tornado.web.Application([ (r"/(?:v[0-9]/)?agents/.*", AgentsHandler), (r"/(?:v[0-9]/)?allowlists/.*", AllowlistHandler), (r".*", MainHandler), ]) context = cloud_verifier_common.init_mtls() # after TLS is up, start revocation notifier if config.getboolean('cloud_verifier', 'revocation_notifier'): logger.info("Starting service for revocation notifications on port %s", config.getint('cloud_verifier', 'revocation_notifier_port')) revocation_notifier.start_broker() sockets = tornado.netutil.bind_sockets( int(cloudverifier_port), address=cloudverifier_host) task_id = tornado.process.fork_processes(config.getint( 'cloud_verifier', 'multiprocessing_pool_num_workers')) asyncio.set_event_loop(asyncio.new_event_loop()) # Auto reactivate agent if task_id == 0: asyncio.ensure_future(activate_agents()) server = tornado.httpserver.HTTPServer(app, ssl_options=context, max_buffer_size=max_upload_size) server.add_sockets(sockets) try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: tornado.ioloop.IOLoop.instance().stop() if config.getboolean('cloud_verifier', 'revocation_notifier'): revocation_notifier.stop_broker()
def main(): for ML in [config.MEASUREDBOOT_ML, config.IMA_ML]: if not os.access(ML, os.F_OK): logger.warning( "Measurement list path %s not accessible by agent. Any attempt to instruct it to access this path - via \"keylime_tenant\" CLI - will result in agent process dying", ML) if config.get('cloud_agent', 'agent_uuid') == 'dmidecode': if os.getuid() != 0: raise RuntimeError('agent_uuid is configured to use dmidecode, ' 'but current process is not running as root.') cmd = ['which', 'dmidecode'] ret = cmd_exec.run(cmd, raiseOnError=False) if ret['code'] != 0: raise RuntimeError('agent_uuid is configured to use dmidecode, ' 'but it\'s is not found on the system.') # Instanitate TPM class instance_tpm = tpm() # get params for initialization registrar_ip = config.get('cloud_agent', 'registrar_ip') registrar_port = config.get('cloud_agent', 'registrar_port') # initialize the tmpfs partition to store keys if it isn't already available secdir = secure_mount.mount() # change dir to working dir config.ch_dir(config.WORK_DIR, logger) # initialize tpm (ekcert, ek_tpm, aik_tpm) = instance_tpm.tpm_init( self_activate=False, config_pw=config.get('cloud_agent', 'tpm_ownerpassword') ) # this tells initialize not to self activate the AIK virtual_agent = instance_tpm.is_vtpm() # try to get some TPM randomness into the system entropy pool instance_tpm.init_system_rand() if ekcert is None: if virtual_agent: ekcert = 'virtual' elif instance_tpm.is_emulator(): ekcert = 'emulator' # now we need the UUID try: agent_uuid = config.get('cloud_agent', 'agent_uuid') except configparser.NoOptionError: agent_uuid = None if agent_uuid == 'openstack': agent_uuid = openstack.get_openstack_uuid() elif agent_uuid == 'hash_ek': agent_uuid = hashlib.sha256(ek_tpm).hexdigest() elif agent_uuid == 'generate' or agent_uuid is None: agent_uuid = str(uuid.uuid4()) elif agent_uuid == 'dmidecode': cmd = ['dmidecode', '-s', 'system-uuid'] ret = cmd_exec.run(cmd) sys_uuid = ret['retout'].decode('utf-8') agent_uuid = sys_uuid.strip() elif agent_uuid == 'hostname': agent_uuid = socket.getfqdn() if config.STUB_VTPM and config.TPM_CANNED_VALUES is not None: # Use canned values for stubbing jsonIn = config.TPM_CANNED_VALUES if "add_vtpm_to_group" in jsonIn: # The value we're looking for has been canned! agent_uuid = jsonIn['add_vtpm_to_group']['retout'] else: # Our command hasn't been canned! raise Exception("Command %s not found in canned json!" % ("add_vtpm_to_group")) logger.info("Agent UUID: %s", agent_uuid) # register it and get back a blob keyblob = registrar_client.doRegisterAgent(registrar_ip, registrar_port, agent_uuid, ek_tpm, ekcert, aik_tpm) if keyblob is None: instance_tpm.flush_keys() raise Exception("Registration failed") # get the ephemeral registrar key key = instance_tpm.activate_identity(keyblob) if key is None: instance_tpm.flush_keys() raise Exception("Activation failed") # tell the registrar server we know the key retval = False retval = registrar_client.doActivateAgent(registrar_ip, registrar_port, agent_uuid, key) if not retval: instance_tpm.flush_keys() raise Exception("Registration failed on activate") serveraddr = (config.get('cloud_agent', 'cloudagent_ip'), config.getint('cloud_agent', 'cloudagent_port')) server = CloudAgentHTTPServer(serveraddr, Handler, agent_uuid) serverthread = threading.Thread(target=server.serve_forever) logger.info("Starting Cloud Agent on %s:%s use <Ctrl-C> to stop", serveraddr[0], serveraddr[1]) serverthread.start() # want to listen for revocations? if config.getboolean('cloud_agent', 'listen_notfications'): cert_path = config.get('cloud_agent', 'revocation_cert') if cert_path == "default": cert_path = '%s/unzipped/RevocationNotifier-cert.crt' % (secdir) elif cert_path[0] != '/': # if it is a relative, convert to absolute in work_dir cert_path = os.path.abspath('%s/%s' % (config.WORK_DIR, cert_path)) def perform_actions(revocation): actionlist = [] # load the actions from inside the keylime module actionlisttxt = config.get('cloud_agent', 'revocation_actions') if actionlisttxt.strip() != "": actionlist = actionlisttxt.split(',') actionlist = ["revocation_actions.%s" % i for i in actionlist] # load actions from unzipped if os.path.exists("%s/unzipped/action_list" % secdir): with open("%s/unzipped/action_list" % secdir, 'r') as f: actionlisttxt = f.read() if actionlisttxt.strip() != "": localactions = actionlisttxt.strip().split(',') for action in localactions: if not action.startswith('local_action_'): logger.warning( "Invalid local action: %s. Must start with local_action_", action) else: actionlist.append(action) uzpath = "%s/unzipped" % secdir if uzpath not in sys.path: sys.path.append(uzpath) for action in actionlist: logger.info("Executing revocation action %s", action) try: module = importlib.import_module(action) execute = getattr(module, 'execute') asyncio.get_event_loop().run_until_complete( execute(revocation)) except Exception as e: logger.warning( "Exception during execution of revocation action %s: %s", action, e) try: while True: try: revocation_notifier.await_notifications( perform_actions, revocation_cert_path=cert_path) except Exception as e: logger.exception(e) logger.warning( "No connection to revocation server, retrying in 10s..." ) time.sleep(10) except KeyboardInterrupt: logger.info("TERM Signal received, shutting down...") instance_tpm.flush_keys() server.shutdown() else: try: while True: time.sleep(1) except KeyboardInterrupt: logger.info("TERM Signal received, shutting down...") instance_tpm.flush_keys() server.shutdown()
import simplejson as json import tornado.ioloop import tornado.web from keylime.requests_client import RequestsClient from keylime.common import states from keylime import config from keylime import keylime_logging from keylime import tenant logger = keylime_logging.init_logging('tenant_webapp') tenant_templ = tenant.Tenant() my_cert, my_priv_key = tenant_templ.get_tls_context() cert = (my_cert, my_priv_key) if config.getboolean('general', "enable_tls"): tls_enabled = True else: tls_enabled = False cert = "" logger.warning( "Warning: TLS is currently disabled, keys will be sent in the clear! This should only be used for testing." ) verifier_ip = config.get('cloud_verifier', 'cloudverifier_ip') verifier_port = config.get('cloud_verifier', 'cloudverifier_port') verifier_base_url = f'{verifier_ip}:{verifier_port}' registrar_ip = config.get('registrar', 'registrar_ip') registrar_tls_port = config.get('registrar', 'registrar_tls_port') registrar_base_tls_url = f'{registrar_ip}:{registrar_tls_port}'
def main(): for ML in [config.MEASUREDBOOT_ML, config.IMA_ML]: if not os.access(ML, os.F_OK): logger.warning( 'Measurement list path %s not accessible by agent. Any attempt to instruct it to access this path - via "keylime_tenant" CLI - will result in agent process dying', ML, ) ima_log_file = None if os.path.exists(config.IMA_ML): ima_log_file = open(config.IMA_ML, "r", encoding="utf-8") # pylint: disable=consider-using-with tpm_log_file_data = None if os.path.exists(config.MEASUREDBOOT_ML): with open(config.MEASUREDBOOT_ML, "rb") as tpm_log_file: tpm_log_file_data = base64.b64encode(tpm_log_file.read()) if config.get("cloud_agent", "agent_uuid") == "dmidecode": if os.getuid() != 0: raise RuntimeError( "agent_uuid is configured to use dmidecode, but current process is not running as root." ) cmd = ["which", "dmidecode"] ret = cmd_exec.run(cmd, raiseOnError=False) if ret["code"] != 0: raise RuntimeError( "agent_uuid is configured to use dmidecode, but it's is not found on the system." ) # initialize the tmpfs partition to store keys if it isn't already available secdir = secure_mount.mount() # Now that operations requiring root privileges are done, drop privileges # if 'run_as' is available in the configuration. if os.getuid() == 0: run_as = config.get("cloud_agent", "run_as", fallback="") if run_as != "": user_utils.chown(secdir, run_as) user_utils.change_uidgid(run_as) logger.info("Dropped privileges to %s", run_as) else: logger.warning( "Cannot drop privileges since 'run_as' is empty or missing in keylime.conf agent section." ) # Instanitate TPM class instance_tpm = tpm() # get params for initialization registrar_ip = config.get("cloud_agent", "registrar_ip") registrar_port = config.get("cloud_agent", "registrar_port") # get params for the verifier to contact the agent contact_ip = os.getenv("KEYLIME_AGENT_CONTACT_IP", None) if contact_ip is None and config.has_option("cloud_agent", "agent_contact_ip"): contact_ip = config.get("cloud_agent", "agent_contact_ip") contact_port = os.getenv("KEYLIME_AGENT_CONTACT_PORT", None) if contact_port is None and config.has_option("cloud_agent", "agent_contact_port"): contact_port = config.get("cloud_agent", "agent_contact_port", fallback="invalid") # change dir to working dir fs_util.ch_dir(config.WORK_DIR) # set a conservative general umask os.umask(0o077) # initialize tpm (ekcert, ek_tpm, aik_tpm) = instance_tpm.tpm_init( self_activate=False, config_pw=config.get("cloud_agent", "tpm_ownerpassword") ) # this tells initialize not to self activate the AIK # Warn if kernel version is <5.10 and another algorithm than SHA1 is used, # because otherwise IMA will not work kernel_version = tuple(platform.release().split("-")[0].split(".")) if tuple(map(int, kernel_version)) < ( 5, 10, 0) and instance_tpm.defaults["hash"] != algorithms.Hash.SHA1: logger.warning( "IMA attestation only works on kernel versions <5.10 with SHA1 as hash algorithm. " 'Even if ascii_runtime_measurements shows "%s" as the ' "algorithm, it might be just padding zeros", (instance_tpm.defaults["hash"]), ) if ekcert is None and instance_tpm.is_emulator(): ekcert = "emulator" # now we need the UUID try: agent_uuid = config.get("cloud_agent", "agent_uuid") except configparser.NoOptionError: agent_uuid = None if agent_uuid == "hash_ek": ek_pubkey = pubkey_from_tpm2b_public(base64.b64decode(ek_tpm)) ek_pubkey_pem = ek_pubkey.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo) agent_uuid = hashlib.sha256(ek_pubkey_pem).hexdigest() elif agent_uuid == "generate" or agent_uuid is None: agent_uuid = str(uuid.uuid4()) elif agent_uuid == "dmidecode": cmd = ["dmidecode", "-s", "system-uuid"] ret = cmd_exec.run(cmd) sys_uuid = ret["retout"][0].decode("utf-8") agent_uuid = sys_uuid.strip() try: uuid.UUID(agent_uuid) except ValueError as e: raise RuntimeError( # pylint: disable=raise-missing-from f"The UUID returned from dmidecode is invalid: {str(e)}") elif agent_uuid == "hostname": agent_uuid = socket.getfqdn() elif agent_uuid == "environment": agent_uuid = os.getenv("KEYLIME_AGENT_UUID", None) if agent_uuid is None: raise RuntimeError( "Env variable KEYLIME_AGENT_UUID is empty, but agent_uuid is set to 'environment'" ) elif not validators.valid_uuid(agent_uuid): raise RuntimeError("The UUID is not valid") if not validators.valid_agent_id(agent_uuid): raise RuntimeError( "The agent ID set via agent uuid parameter use invalid characters") logger.info("Agent UUID: %s", agent_uuid) serveraddr = (config.get("cloud_agent", "cloudagent_ip"), config.getint("cloud_agent", "cloudagent_port")) keylime_ca = config.get("cloud_agent", "keylime_ca") if keylime_ca == "default": keylime_ca = os.path.join(config.WORK_DIR, "cv_ca", "cacert.crt") server = CloudAgentHTTPServer(serveraddr, Handler, agent_uuid, contact_ip, ima_log_file, tpm_log_file_data) if server.mtls_cert_enabled: context = web_util.generate_mtls_context(server.mtls_cert_path, server.rsakey_path, keylime_ca, logger=logger) server.socket = context.wrap_socket(server.socket, server_side=True) else: if (not config.getboolean( "cloud_agent", "enable_insecure_payload", fallback=False) and config.get("cloud_agent", "payload_script") != ""): raise RuntimeError( "agent mTLS is disabled, while a tenant can instruct the agent to execute code on the node. " 'In order to allow the running of the agent, "enable_insecure_payload" has to be set to "True"' ) serverthread = threading.Thread(target=server.serve_forever, daemon=True) # register it and get back a blob mtls_cert = "disabled" if server.mtls_cert: mtls_cert = server.mtls_cert.public_bytes(serialization.Encoding.PEM) keyblob = registrar_client.doRegisterAgent(registrar_ip, registrar_port, agent_uuid, ek_tpm, ekcert, aik_tpm, mtls_cert, contact_ip, contact_port) if keyblob is None: instance_tpm.flush_keys() raise Exception("Registration failed") # get the ephemeral registrar key key = instance_tpm.activate_identity(keyblob) if key is None: instance_tpm.flush_keys() raise Exception("Activation failed") # tell the registrar server we know the key retval = registrar_client.doActivateAgent(registrar_ip, registrar_port, agent_uuid, key) if not retval: instance_tpm.flush_keys() raise Exception("Registration failed on activate") # Start revocation listener in a new process to not interfere with tornado revocation_process = multiprocessing.Process(target=revocation_listener, daemon=True) revocation_process.start() logger.info( "Starting Cloud Agent on %s:%s with API version %s. Use <Ctrl-C> to stop", serveraddr[0], serveraddr[1], keylime_api_version.current_version(), ) serverthread.start() def shutdown_handler(*_): logger.info("TERM Signal received, shutting down...") logger.debug("Stopping revocation notifier...") revocation_process.terminate() logger.debug("Shutting down HTTP server...") server.shutdown() server.server_close() serverthread.join() logger.debug("HTTP server stopped...") revocation_process.join() logger.debug("Revocation notifier stopped...") secure_mount.umount() logger.debug("Umounting directories...") instance_tpm.flush_keys() logger.debug("Flushed keys successfully") sys.exit(0) signal.signal(signal.SIGTERM, shutdown_handler) signal.signal(signal.SIGQUIT, shutdown_handler) signal.signal(signal.SIGINT, shutdown_handler) # Keep the main thread alive by waiting for the server thread serverthread.join()
def __init__(self, server_address, RequestHandlerClass, agent_uuid, contact_ip, ima_log_file, tpm_log_file_data): """Constructor overridden to provide ability to pass configuration arguments to the server""" # Find the locations for the U/V transport and mTLS key and certificate. # They are either relative to secdir (/var/lib/keylime/secure) or absolute paths. secdir = secure_mount.mount() keyname = config.get("cloud_agent", "rsa_keyname") if not os.path.isabs(keyname): keyname = os.path.join(secdir, keyname) # read or generate the key depending on configuration if os.path.isfile(keyname): # read in private key logger.info("Using existing key in %s", keyname) with open(keyname, "rb") as f: rsa_key = crypto.rsa_import_privkey(f.read()) else: logger.info( "Key for U/V transport and mTLS certificate not found, generating a new one" ) rsa_key = crypto.rsa_generate(2048) with open(keyname, "wb") as f: f.write(crypto.rsa_export_privkey(rsa_key)) self.rsakey_path = keyname self.rsaprivatekey = rsa_key self.rsapublickey_exportable = crypto.rsa_export_pubkey( self.rsaprivatekey) self.mtls_cert_enabled = config.getboolean("cloud_agent", "mtls_cert_enabled", fallback=False) if self.mtls_cert_enabled: certname = config.get("cloud_agent", "mtls_cert") if not os.path.isabs(certname): certname = os.path.join(secdir, certname) if os.path.isfile(certname): logger.info("Using existing mTLS cert in %s", certname) with open(certname, "rb") as f: mtls_cert = x509.load_pem_x509_certificate( f.read(), backend=default_backend()) else: logger.info("No mTLS certificate found, generating a new one") agent_ips = [server_address[0]] if contact_ip is not None: agent_ips.append(contact_ip) with open(certname, "wb") as f: # By default generate a TLS certificate valid for 5 years valid_util = datetime.datetime.utcnow( ) + datetime.timedelta(days=(360 * 5)) mtls_cert = crypto.generate_selfsigned_cert( agent_uuid, rsa_key, valid_util, agent_ips) f.write(mtls_cert.public_bytes(serialization.Encoding.PEM)) self.mtls_cert_path = certname self.mtls_cert = mtls_cert else: self.mtls_cert_path = None self.mtls_cert = None logger.info( "WARNING: mTLS disabled, Tenant and Verifier will reach out to agent via HTTP" ) self.revocation_cert_path = config.get("cloud_agent", "revocation_cert") if self.revocation_cert_path == "default": self.revocation_cert_path = os.path.join( secdir, "unzipped/RevocationNotifier-cert.crt") elif self.revocation_cert_path[0] != "/": # if it is a relative, convert to absolute in work_dir self.revocation_cert_path = os.path.abspath( os.path.join(config.WORK_DIR, self.revocation_cert_path)) # attempt to get a U value from the TPM NVRAM nvram_u = tpm_instance.read_key_nvram() if nvram_u is not None: logger.info("Existing U loaded from TPM NVRAM") self.add_U(nvram_u) http.server.HTTPServer.__init__(self, server_address, RequestHandlerClass) self.enc_keyname = config.get("cloud_agent", "enc_keyname") self.agent_uuid = agent_uuid self.ima_log_file = ima_log_file self.tpm_log_file_data = tpm_log_file_data
def do_POST(self): """This method services the POST request typically from either the Tenant or the Cloud Verifier. Only tenant and cloudverifier uri's are supported. Both requests require a nonce parameter. The Cloud verifier requires an additional mask parameter. If the uri or parameters are incorrect, a 400 response is returned. """ rest_params = web_util.get_restful_params(self.path) if rest_params is None: web_util.echo_json_response( self, 405, "Not Implemented: Use /keys/ or /notifications/ interface") return if not rest_params["api_version"]: web_util.echo_json_response(self, 400, "API Version not supported") return content_length = int(self.headers.get("Content-Length", 0)) if content_length <= 0: logger.warning( "POST returning 400 response, expected content in message. url: %s", self.path) web_util.echo_json_response(self, 400, "expected content in message") return post_body = self.rfile.read(content_length) try: json_body = json.loads(post_body) except Exception as e: logger.warning( "POST returning 400 response, could not parse body data: %s", e) web_util.echo_json_response(self, 400, "content is invalid") return if "notifications" in rest_params: if rest_params["notifications"] == "revocation": revocation_notifier.process_revocation( json_body, perform_actions, cert_path=self.server.revocation_cert_path) web_util.echo_json_response(self, 200, "Success") else: web_util.echo_json_response( self, 400, "Only /notifications/revocation is supported") return if rest_params.get("keys", None) not in ["ukey", "vkey"]: web_util.echo_json_response( self, 400, "Only /keys/ukey or /keys/vkey are supported") return try: b64_encrypted_key = json_body["encrypted_key"] decrypted_key = crypto.rsa_decrypt( self.server.rsaprivatekey, base64.b64decode(b64_encrypted_key)) except (ValueError, KeyError, TypeError) as e: logger.warning( "POST returning 400 response, could not parse body data: %s", e) web_util.echo_json_response(self, 400, "content is invalid") return have_derived_key = False if rest_params["keys"] == "ukey": if "auth_tag" not in json_body: logger.warning( "POST returning 400 response, U key provided without an auth_tag" ) web_util.echo_json_response(self, 400, "auth_tag is missing") return self.server.add_U(decrypted_key) self.server.auth_tag = json_body["auth_tag"] self.server.payload = json_body.get("payload", None) have_derived_key = self.server.attempt_decryption() elif rest_params["keys"] == "vkey": self.server.add_V(decrypted_key) have_derived_key = self.server.attempt_decryption() else: logger.warning("POST returning response. uri not supported: %s", self.path) web_util.echo_json_response(self, 400, "uri not supported") return logger.info("POST of %s key returning 200", ("V", "U")[rest_params["keys"] == "ukey"]) web_util.echo_json_response(self, 200, "Success") # no key yet, then we're done if not have_derived_key: return # woo hoo we have a key # ok lets write out the key now secdir = secure_mount.mount( ) # confirm that storage is still securely mounted # clean out the secure dir of any previous info before we extract files if os.path.isdir(os.path.join(secdir, "unzipped")): shutil.rmtree(os.path.join(secdir, "unzipped")) # write out key file with open(os.path.join(secdir, self.server.enc_keyname), "w", encoding="utf-8") as f: f.write(base64.b64encode(self.server.K).decode()) # stow the U value for later tpm_instance.write_key_nvram(self.server.final_U) # optionally extend a hash of they key and payload into specified PCR tomeasure = self.server.K # if we have a good key, now attempt to write out the encrypted payload dec_path = os.path.join(secdir, config.get("cloud_agent", "dec_payload_file")) enc_path = os.path.join(config.WORK_DIR, "encrypted_payload") dec_payload = None enc_payload = None if self.server.payload is not None: if not self.server.mtls_cert_enabled and not config.getboolean( "cloud_agent", "enable_insecure_payload", fallback=False): logger.warning( 'agent mTLS is disabled, and unless "enable_insecure_payload" is set to "True", payloads cannot be deployed' ) enc_payload = None else: dec_payload = crypto.decrypt(self.server.payload, bytes(self.server.K)) enc_payload = self.server.payload elif os.path.exists(enc_path): # if no payload provided, try to decrypt one from a previous run stored in encrypted_payload with open(enc_path, "rb") as f: enc_payload = f.read() try: dec_payload = crypto.decrypt(enc_payload, self.server.K) logger.info("Decrypted previous payload in %s to %s", enc_path, dec_path) except Exception as e: logger.warning( "Unable to decrypt previous payload %s with derived key: %s", enc_path, e) os.remove(enc_path) enc_payload = None # also write out encrypted payload to be decrytped next time if enc_payload is not None: with open(enc_path, "wb") as f: f.write(self.server.payload.encode("utf-8")) # deal with payload payload_thread = None if dec_payload is not None: tomeasure = tomeasure + dec_payload # see if payload is a zip zfio = io.BytesIO(dec_payload) if config.getboolean( "cloud_agent", "extract_payload_zip") and zipfile.is_zipfile(zfio): logger.info("Decrypting and unzipping payload to %s/unzipped", secdir) with zipfile.ZipFile(zfio, "r") as f: f.extractall(os.path.join(secdir, "unzipped")) # run an included script if one has been provided initscript = config.get("cloud_agent", "payload_script") if initscript != "": def initthread(): env = os.environ.copy() env["AGENT_UUID"] = self.server.agent_uuid with subprocess.Popen( ["/bin/bash", initscript], env=env, shell=False, cwd=os.path.join(secdir, "unzipped"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) as proc: for line in iter(proc.stdout.readline, b""): logger.debug("init-output: %s", line.strip()) # should be a no-op as poll already told us it's done proc.wait() if not os.path.exists( os.path.join(secdir, "unzipped", initscript)): logger.info( "No payload script %s found in %s/unzipped", initscript, secdir) else: logger.info("Executing payload script: %s/unzipped/%s", secdir, initscript) payload_thread = threading.Thread(target=initthread, daemon=True) else: logger.info("Decrypting payload to %s", dec_path) with open(dec_path, "wb") as f: f.write(dec_payload) zfio.close() # now extend a measurement of the payload and key if there was one pcr = config.getint("cloud_agent", "measure_payload_pcr") if 0 < pcr < 24: logger.info("extending measurement of payload into PCR %s", pcr) measured = tpm_instance.hashdigest(tomeasure) tpm_instance.extendPCR(pcr, measured) if payload_thread is not None: payload_thread.start() return
def revocation_listener(): """ This configures and starts the revocation listener. It is designed to be started in a separate process. """ if config.has_option('cloud_agent', 'listen_notifications'): if not config.getboolean('cloud_agent', 'listen_notifications'): return # keep old typo "listen_notfications" around for a few versions if config.has_option('cloud_agent', 'listen_notfications'): logger.warning( 'Option typo "listen_notfications" is deprecated. Please use "listen_notifications" instead.' ) if not config.getboolean('cloud_agent', 'listen_notfications'): return secdir = secure_mount.mount() cert_path = config.get('cloud_agent', 'revocation_cert') if cert_path == "default": cert_path = os.path.join(secdir, "unzipped/RevocationNotifier-cert.crt") elif cert_path[0] != '/': # if it is a relative, convert to absolute in work_dir cert_path = os.path.abspath(os.path.join(config.WORK_DIR, cert_path)) # Callback function handling the revocations def perform_actions(revocation): actionlist = [] # load the actions from inside the keylime module actionlisttxt = config.get('cloud_agent', 'revocation_actions') if actionlisttxt.strip() != "": actionlist = actionlisttxt.split(',') actionlist = ["revocation_actions.%s" % i for i in actionlist] # load actions from unzipped action_list_path = os.path.join(secdir, "unzipped/action_list") if os.path.exists(action_list_path): with open(action_list_path, encoding="utf-8") as f: actionlisttxt = f.read() if actionlisttxt.strip() != "": localactions = actionlisttxt.strip().split(',') for action in localactions: if not action.startswith('local_action_'): logger.warning( "Invalid local action: %s. Must start with local_action_", action) else: actionlist.append(action) uzpath = "%s/unzipped" % secdir if uzpath not in sys.path: sys.path.append(uzpath) for action in actionlist: logger.info("Executing revocation action %s", action) try: module = importlib.import_module(action) execute = getattr(module, 'execute') asyncio.get_event_loop().run_until_complete( execute(revocation)) except Exception as e: logger.warning( "Exception during execution of revocation action %s: %s", action, e) try: while True: try: revocation_notifier.await_notifications( perform_actions, revocation_cert_path=cert_path) except Exception as e: logger.exception(e) logger.warning( "No connection to revocation server, retrying in 10s...") time.sleep(10) except (KeyboardInterrupt, SystemExit): logger.info("Stopping revocation listener...")
def validate_tpm_quote(self, public_key, quote, hash_alg): """ Validate TPM Quote received from the Agent Arguments: public_key {[type]} -- [description] quote {[type]} -- [description] hash_alg {bool} -- [description] Raises: UserError: [description] Returns: [type] -- [description] """ registrar_client.init_client_tls('tenant') reg_keys = registrar_client.getKeys( self.registrar_ip, self.registrar_port, self.agent_uuid) if reg_keys is None: logger.warning("AIK not found in registrar, quote not validated") return False if not self.tpm_instance.check_quote(self.agent_uuid, self.nonce, public_key, quote, reg_keys['aik_tpm'], hash_alg=hash_alg): if reg_keys['regcount'] > 1: logger.error("WARNING: This UUID had more than one ek-ekcert registered to it! This might indicate that your system is misconfigured or a malicious host is present. Run 'regdelete' for this agent and restart") sys.exit() return False if reg_keys['regcount'] > 1: logger.warning("WARNING: This UUID had more than one ek-ekcert registered to it! This might indicate that your system is misconfigured. Run 'regdelete' for this agent and restart") if not config.STUB_TPM and (not config.getboolean('tenant', 'require_ek_cert') and config.get('tenant', 'ek_check_script') == ""): logger.warning( "DANGER: EK cert checking is disabled and no additional checks on EKs have been specified with ek_check_script option. Keylime is not secure!!") # check EK cert and make sure it matches EK if not self.check_ek(reg_keys['ekcert']): return False # if agent is virtual, check phyisical EK cert and make sure it matches phyiscal EK if 'provider_keys' in reg_keys: if not self.check_ek(reg_keys['provider_keys']['ekcert']): return False # check all EKs with optional script: script = config.get('tenant', 'ek_check_script') if not script: return True if script[0] != '/': script = "%s/%s" % (config.WORK_DIR, script) logger.info("Checking EK with script %s", script) # now we need to exec the script with the ek and ek cert in vars env = os.environ.copy() env['AGENT_UUID'] = self.agent_uuid env['EK'] = tpm2_objects.pubkey_from_tpm2b_public( base64.b64decode(reg_keys['ek_tpm']), ).public_bytes( crypto_serialization.Encoding.PEM, crypto_serialization.PublicFormat.SubjectPublicKeyInfo, ) env['EK_TPM'] = reg_keys['ek_tpm'] if reg_keys['ekcert'] is not None: env['EK_CERT'] = reg_keys['ekcert'] else: env['EK_CERT'] = "" env['PROVKEYS'] = json.dumps(reg_keys.get('provider_keys', {})) proc = subprocess.Popen(script, env=env, shell=True, cwd=config.WORK_DIR, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) retval = proc.wait() if retval != 0: raise UserError("External check script failed to validate EK") logger.debug("External check script successfully to validated EK") while True: line = proc.stdout.readline().decode() if line == "": break logger.debug("ek_check output: %s", line.strip()) return True