def add_vtpm(inputfile): # read in the file with open(inputfile, 'r') as f: group = json.load(f) # fetch configuration parameters provider_reg_port = config.get('general', 'provider_registrar_port') provider_reg_ip = config.get('general', 'provider_registrar_ip') # request a vtpm uuid from the manager vtpm_uuid = vtpm_manager.add_vtpm_to_group(group['uuid']) # registrar it and get back a blob keyblob = registrar_client.doRegisterNode(provider_reg_ip, provider_reg_port, vtpm_uuid, group['pubekpem'], group['ekcert'], group['aikpem']) # get the ephemeral registrar key by activating in the hardware tpm key = base64.b64encode(vtpm_manager.activate_group(group['uuid'], keyblob)) # tell the registrar server we know the key registrar_client.doActivateNode(provider_reg_ip, provider_reg_port, vtpm_uuid, key) logger.info("Registered new vTPM with UUID: %s" % (vtpm_uuid)) return vtpm_uuid
def add_vtpm(inputfile): if common.STUB_TPM: group = { 'uuid': common.TEST_GROUP_UUID, 'aikpem': common.TEST_HAIK, 'pubekpem': common.TEST_PUB_EK, 'ekcert': common.TEST_EK_CERT, } else: # read in the file with open(inputfile,'r') as f: group = json.load(f) # fetch configuration parameters provider_reg_port = config.get('general', 'provider_registrar_port') provider_reg_ip = config.get('general', 'provider_registrar_ip') # request a vtpm uuid from the manager vtpm_uuid = vtpm_manager.add_vtpm_to_group(group['uuid']) # use an TLS context with no certificate checking registrar_client.noAuthTLSContext(config) # registrar it and get back a blob keyblob = registrar_client.doRegisterNode(provider_reg_ip,provider_reg_port,vtpm_uuid,group['pubekpem'],group['ekcert'],group['aikpem']) # get the ephemeral registrar key by activating in the hardware tpm key = base64.b64encode(vtpm_manager.activate_group(group['uuid'], keyblob)) # tell the registrar server we know the key registrar_client.doActivateNode(provider_reg_ip,provider_reg_port,vtpm_uuid,key) logger.info("Registered new vTPM with UUID: %s"%(vtpm_uuid)) return vtpm_uuid
def main(argv=sys.argv): if os.getuid()!=0 and common.REQUIRE_ROOT: logger.critical("This process must be run as root.") return # get params for initialization registrar_ip = config.get('general', 'registrar_ip') registrar_port = config.get('general', 'registrar_port') # initialize the tmpfs partition to store keys if it isn't already available secdir = secure_mount.mount() # change dir to working dir common.ch_dir(common.WORK_DIR,logger) #initialize tpm (ek,ekcert,aik) = tpm_initialize.init(self_activate=False,config_pw=config.get('cloud_node','tpm_ownerpassword')) # this tells initialize not to self activate the AIK virtual_node = tpm_initialize.is_vtpm() # try to get some TPM randomness into the system entropy pool tpm_random.init_system_rand() if ekcert is None: if virtual_node: ekcert = 'virtual' elif tpm_initialize.is_emulator(): ekcert = 'emulator' # now we need the UUID try: node_uuid = config.get('cloud_node','node_uuid') except ConfigParser.NoOptionError: node_uuid = None if node_uuid == 'openstack': node_uuid = openstack.get_openstack_uuid() elif node_uuid == 'hash_ek': node_uuid = hashlib.sha256(ek).hexdigest() elif node_uuid == 'generate' or node_uuid is None: node_uuid = str(uuid.uuid4()) if common.DEVELOP_IN_ECLIPSE: node_uuid = "C432FBB3-D2F1-4A97-9EF7-75BD81C866E9" if common.STUB_VTPM and common.TPM_CANNED_VALUES is not None: # Use canned values for stubbing jsonIn = common.TPM_CANNED_VALUES if "add_vtpm_to_group" in jsonIn: # The value we're looking for has been canned! node_uuid = jsonIn['add_vtpm_to_group']['retout'] else: # Our command hasn't been canned! raise Exception("Command %s not found in canned JSON!"%("add_vtpm_to_group")) logger.info("Node UUID: %s"%node_uuid) # register it and get back a blob keyblob = registrar_client.doRegisterNode(registrar_ip,registrar_port,node_uuid,ek,ekcert,aik) if keyblob is None: raise Exception("Registration failed") # get the ephemeral registrar key key = tpm_initialize.activate_identity(keyblob) # tell the registrar server we know the key retval=False if virtual_node: deepquote = tpm_quote.create_deep_quote(hashlib.sha1(key).hexdigest(),node_uuid+aik+ek) retval = registrar_client.doActivateVirtualNode(registrar_ip, registrar_port, node_uuid, deepquote) else: retval = registrar_client.doActivateNode(registrar_ip,registrar_port,node_uuid,key) if not retval: raise Exception("Registration failed on activate") serveraddr = ('', config.getint('general', 'cloudnode_port')) server = CloudNodeHTTPServer(serveraddr,Handler,node_uuid) serverthread = threading.Thread(target=server.serve_forever) logger.info( 'Starting Cloud Node on port %s use <Ctrl-C> to stop'%serveraddr[1]) serverthread.start() # want to listen for revocations? if config.getboolean('cloud_node','listen_notfications'): cert_path = config.get('cloud_node','revocation_cert') if cert_path == "default": cert_path = '%s/unzipped/RevocationNotifier-cert.crt'%(secdir) elif cert_path[0]!='/': # if it is a relative, convert to absolute in work_dir cert_path = os.path.abspath('%s/%s'%(common.WORK_DIR,cert_path)) def perform_actions(revocation): actionlist = config.get('cloud_node','revocation_actions') if actionlist.strip() == "": logger.debug("No revocation actions specified") return if actionlist =='default': # load actions from unzipped with open("%s/unzipped/action_list"%secdir,'r') as f: actionlist = f.read() actionlist = actionlist.strip().split(',') uzpath = "%s/unzipped"%secdir if uzpath not in sys.path: sys.path.append(uzpath) else: # load the actions from inside the keylime module actionlist = actionlist.split(',') actionlist = ["revocation_actions.%s"%i for i in actionlist] for action in actionlist: module = importlib.import_module(action) execute = getattr(module,'execute') try: execute(revocation) except Exception as e: logger.warn("Exception during exeuction of revocation action %s: %s"%(action,e)) try: while True: try: revocation_notifier.await_notifications(perform_actions,revocation_cert_path=cert_path) except Exception as e: logger.exception(e) logger.warn("No connection to revocation server, retrying in 10s...") time.sleep(10) except KeyboardInterrupt: logger.info("TERM Signal received, shutting down...") tpm_initialize.flush_keys() server.shutdown() else: try: while True: time.sleep(1) except KeyboardInterrupt: logger.info("TERM Signal received, shutting down...") tpm_initialize.flush_keys() server.shutdown()
def main(argv=sys.argv): if common.DEVELOP_IN_ECLIPSE: argv = ['provider_platform_init.py','1','2'] if len(argv)<3: print "usage: provider_platform_init.py pubek.pem tpm_ekcert.der" print "\tassociates a hypervisor host to its TPM and registers it" print print "\tYou must obtain the public EK and the EK certificate from outside of Xen" print "\ttake ownership first, then obtain pubek, and ekcert as follows" print "\t takeown -pwdo <owner_password>" print "\t getpubek -pwdo <owner-password>" print "\t nv_readvalue -pwdo <owner-password> -in 1000f000 -cert -of tpm_ekcert.der" sys.exit(-1) if common.DEVELOP_IN_ECLIPSE and not common.STUB_TPM: raise Exception("Can't use Xen features in Eclipse without STUB_TPM") # read in the pubek if common.DEVELOP_IN_ECLIPSE: ek = common.TEST_PUB_EK ekcert = common.TEST_EK_CERT else: f = open(argv[1],'r') ek = f.read() f.close() f = open(argv[2],'r') ekcert = base64.b64encode(f.read()) f.close() # fetch configuration parameters provider_reg_port = config.get('general', 'provider_registrar_port') provider_reg_ip = config.get('general', 'provider_registrar_ip') # create a new group (group_uuid,group_aik,group_num,_) = vtpm_manager.add_vtpm_group() # registrar it and get back a blob keyblob = registrar_client.doRegisterNode(provider_reg_ip,provider_reg_port,group_uuid,ek,ekcert,group_aik) # get the ephemeral registrar key by activating in the hardware tpm key = base64.b64encode(vtpm_manager.activate_group(group_uuid, keyblob)) # tell the registrar server we know the key registrar_client.doActivateNode(provider_reg_ip,provider_reg_port,group_uuid,key) output= { 'uuid': group_uuid, 'aikpem': group_aik, 'pubekpem': ek, 'ekcert': ekcert, } # store the key and the group UUID in a file to add to vtpms later with open("group-%d-%s.tpm"%(group_num,group_uuid),'w') as f: json.dump(output,f) logger.info("Activated VTPM group %d, UUID %s"%(group_num,group_uuid)) if group_num==0: logger.info("WARNING: Group 0 created, repeating activation again to create Group 1") main(argv) else: # create a symlink to the most recently create group symlink_force("group-%d-%s.tpm"%(group_num,group_uuid),"current_group.tpm")
def main(argv=sys.argv): if os.getuid() != 0 and not common.DEVELOP_IN_ECLIPSE: logger.critical("This process must be run as root.") return # get params for initialization registrar_ip = config.get('general', 'registrar_ip') registrar_port = config.get('general', 'registrar_port') # initialize the tmpfs partition to store keys if it isn't already available secure_mount.mount() # change dir to working dir common.ch_dir() #initialize tpm (ek, ekcert, aik) = tpm_initialize.init( self_activate=False, config_pw=config.get('cloud_node', 'tpm_ownerpassword') ) # this tells initialize not to self activate the AIK virtual_node = tpm_initialize.is_vtpm() if common.STUB_TPM: ekcert = common.TEST_EK_CERT if virtual_node and (ekcert is None or common.STUB_TPM): ekcert = 'virtual' # now we need the UUID try: node_uuid = config.get('cloud_node', 'node_uuid') except ConfigParser.NoOptionError: node_uuid = None if node_uuid == 'openstack': node_uuid = openstack.get_openstack_uuid() elif node_uuid == 'hash_ek': node_uuid = hashlib.sha256(ek).hexdigest() elif node_uuid == 'generate' or node_uuid is None: node_uuid = str(uuid.uuid4()) if common.DEVELOP_IN_ECLIPSE: node_uuid = "C432FBB3-D2F1-4A97-9EF7-75BD81C866E9" # use an TLS context with no certificate checking registrar_client.noAuthTLSContext(config) # register it and get back a blob keyblob = registrar_client.doRegisterNode(registrar_ip, registrar_port, node_uuid, ek, ekcert, aik) if keyblob is None: raise Exception("Registration failed") # get the ephemeral registrar key key = tpm_initialize.activate_identity(keyblob) # tell the registrar server we know the key if virtual_node: deepquote = tpm_quote.create_deep_quote( hashlib.sha1(key).hexdigest(), node_uuid + aik + ek) registrar_client.doActivateVirtualNode(registrar_ip, registrar_port, node_uuid, deepquote) else: registrar_client.doActivateNode(registrar_ip, registrar_port, node_uuid, key) serveraddr = ('', config.getint('general', 'cloudnode_port')) server = CloudNodeHTTPServer(serveraddr, Handler, node_uuid) thread = threading.Thread(target=server.serve_forever) threads = [] servers = [] threads.append(thread) servers.append(server) # start the server logger.info('Starting Cloud Node on port %s use <Ctrl-C> to stop' % serveraddr[1]) for thread in threads: thread.start() def signal_handler(signal, frame): do_shutdown(servers) sys.exit(0) # Catch these signals. Note that a SIGKILL cannot be caught, so # killing this process with "kill -9" may result in improper shutdown signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGQUIT, signal_handler) signal.signal(signal.SIGINT, signal_handler) # keep the main thread active, so it can process the signals and gracefully shutdown while True: if not any([thread.isAlive() for thread in threads]): # All threads have stopped break else: # Some threads are still going time.sleep(1) for thread in threads: thread.join()