def gateway_directives_from_volume_info( volume_info, local_hostname, slice_secret ): """ Extract gateway directives from an observer's description of the volume for this host. """ gateway_directives = { "UG": {}, "RG": {}, "AG": {} } volume_name = volume_info[ observer_cred.OPENCLOUD_VOLUME_NAME ] gateway_name_prefix = volume_info[ observer_cred.OPENCLOUD_SLICE_GATEWAY_NAME_PREFIX ] # get what we need... try: RG_hostname = local_hostname AG_hostname = local_hostname # global hostnames (i.e. multiple instantiations of the same gateway) override local hostnames. if volume_info[ observer_cred.OPENCLOUD_SLICE_AG_GLOBAL_HOSTNAME ] is not None: AG_hostname = volume_info[ observer_cred.OPENCLOUD_SLICE_AG_GLOBAL_HOSTNAME ] if volume_info[ observer_cred.OPENCLOUD_SLICE_RG_GLOBAL_HOSTNAME ] is not None: RG_hostname = volume_info[ observer_cred.OPENCLOUD_SLICE_RG_GLOBAL_HOSTNAME ] gateway_directives["UG"]["instantiate"] = volume_info[ observer_cred.OPENCLOUD_SLICE_INSTANTIATE_UG ] gateway_directives["UG"]["run"] = volume_info[ observer_cred.OPENCLOUD_SLICE_RUN_UG ] gateway_directives["UG"]["port"] = volume_info[ observer_cred.OPENCLOUD_SLICE_UG_PORT ] gateway_directives["UG"]["closure"] = volume_info[ observer_cred.OPENCLOUD_SLICE_UG_CLOSURE ] gateway_directives["UG"]["name"] = provisioning.make_gateway_name( gateway_name_prefix, "UG", volume_name, local_hostname ) gateway_directives["UG"]["key_password"] = provisioning.make_gateway_private_key_password( gateway_directives["UG"]["name"], slice_secret ) gateway_directives["UG"]["hostname"] = local_hostname gateway_directives["RG"]["instantiate"] = volume_info[ observer_cred.OPENCLOUD_SLICE_INSTANTIATE_RG ] gateway_directives["RG"]["run"] = volume_info[ observer_cred.OPENCLOUD_SLICE_RUN_RG ] gateway_directives["RG"]["port"] = volume_info[ observer_cred.OPENCLOUD_SLICE_RG_PORT ] gateway_directives["RG"]["closure"] = volume_info[ observer_cred.OPENCLOUD_SLICE_RG_CLOSURE ] gateway_directives["RG"]["name"] = provisioning.make_gateway_name( gateway_name_prefix, "RG", volume_name, RG_hostname ) gateway_directives["RG"]["key_password"] = provisioning.make_gateway_private_key_password( gateway_directives["RG"]["name"], slice_secret ) gateway_directives["RG"]["hostname"] = RG_hostname gateway_directives["AG"]["instantiate"] = volume_info[ observer_cred.OPENCLOUD_SLICE_INSTANTIATE_AG ] gateway_directives["AG"]["run"] = volume_info[ observer_cred.OPENCLOUD_SLICE_RUN_AG ] gateway_directives["AG"]["port"] = volume_info[ observer_cred.OPENCLOUD_SLICE_AG_PORT ] gateway_directives["AG"]["closure"] = volume_info[ observer_cred.OPENCLOUD_SLICE_AG_CLOSURE ] gateway_directives["AG"]["name"] = provisioning.make_gateway_name( gateway_name_prefix, "AG", volume_name, AG_hostname ) gateway_directives["AG"]["key_password"] = provisioning.make_gateway_private_key_password( gateway_directives["AG"]["name"], slice_secret ) gateway_directives["AG"]["hostname"] = AG_hostname except Exception, e: log.exception(e) log.error("Invalid configuration for Volume %s" % volume_name) return None
def setup_global_RG(principal_id, volume_name, gateway_name_prefix, slice_secret, RG_port, RG_closure, global_hostname="localhost"): """ Create/read an RG that will run on each host, on a particular global hostname. """ client = connect_syndicate() RG_name = syndicate_provisioning.make_gateway_name(gateway_name_prefix, "RG", volume_name, global_hostname) RG_key_password = syndicate_provisioning.make_gateway_private_key_password( RG_name, slice_secret) try: rc = syndicate_provisioning.ensure_RG_exists(client, principal_id, volume_name, RG_name, global_hostname, RG_port, RG_key_password, closure=RG_closure) except Exception, e: logger.exception(e) return False
def setup_global_RG( principal_id, volume_name, gateway_name_prefix, slice_secret, RG_port, RG_closure, global_hostname="localhost" ): """ Create/read an RG that will run on each host, on a particular global hostname. """ client = connect_syndicate() RG_name = syndicate_provisioning.make_gateway_name( gateway_name_prefix, "RG", volume_name, global_hostname ) RG_key_password = syndicate_provisioning.make_gateway_private_key_password( RG_name, slice_secret ) try: rc = syndicate_provisioning.ensure_RG_exists( client, principal_id, volume_name, RG_name, global_hostname, RG_port, RG_key_password, closure=RG_closure ) except Exception, e: logger.exception(e) return False
* create the Volume Access Right for the user, so (s)he can create Gateways. * provision a single Replica Gateway, serving on localhost. """ client = connect_syndicate() try: rc = ensure_volume_access_right_exists(user_email, volume_name, caps) assert rc is True, "Failed to create access right for %s in %s" % ( user_email, volume_name) except Exception, e: logger.exception(e) return False RG_name = syndicate_provisioning.make_gateway_name("OpenCloud", "RG", volume_name, "localhost") RG_key_password = syndicate_provisioning.make_gateway_private_key_password( RG_name, slice_secret) try: rc = syndicate_provisioning.ensure_RG_exists(client, user_email, volume_name, RG_name, "localhost", RG_port, RG_key_password, closure=RG_closure) except Exception, e: logger.exception(e)
def upload_keys(new_emails, user_infos): """ Save all private keys for the users we just provisioned (user and volume keys) Return True if they all succeed Return False if at least one fails """ syndicate_config = conf.get_config_from_argv(sys.argv) user_bundles = {} # make initial user bundles for user_name in new_emails: user_pkey = storage.load_private_key( syndicate_config, "user", user_name ) if user_pkey is None: log.error("Automount daemon failed to produce key for {}".format(user_name)) return False user_cert = object_stub.load_user_cert(syndicate_config, user_name) if user_cert is None: log.error("Automount daemon failed to produce cert for {}".format(user_name)) return False ug_name = provisioning.make_gateway_name('demo', 'UG', sanitize_name('volume-{}'.format(user_name)), 'localhost') rg_name = provisioning.make_gateway_name('demo', 'RG', sanitize_name('volume-{}'.format(user_name)), 'localhost') ug_pkey = storage.load_private_key( syndicate_config, "gateway", ug_name) if ug_pkey is None: log.error("Automount daemon failed to produce key for {}".format(ug_name)) return False rg_pkey = storage.load_private_key( syndicate_config, "gateway", rg_name) if rg_pkey is None: log.error("Automount daemon failed to produce key for {}".format(rg_name)) return False ug_cert = object_stub.load_gateway_cert(syndicate_config, ug_name) if ug_cert is None: log.error("Automount daemon failed to produce cert for {}".format(ug_name)) return False rg_cert = object_stub.load_gateway_cert(syndicate_config, rg_name) if rg_cert is None: log.error("Automount daemon failed to produce cert for {}".format(rg_name)) return False # gateway keys for the same volume must be the same, initially if ug_pkey.exportKey() != rg_pkey.exportKey(): log.error("Automount daemon did not produce the same initial key for {} and {}".format(ug_name, rg_name)) return False user_bundles[user_name] = { 'user_pkey': user_pkey.exportKey(), 'user_cert': base64.b64encode(user_cert.SerializeToString()), 'ug_cert': base64.b64encode(ug_cert.SerializeToString()), 'rg_cert': base64.b64encode(rg_cert.SerializeToString()), 'gateway_pkey': ug_pkey.exportKey() } # encrypt private keys for user_info in user_infos: user_name = user_info['email'] user_password = user_info['password'] if user_name not in new_emails: continue if len(user_password) == 0: # skip this user log.debug("Skipping already-processed user {}".format(user_name)) del user_bundles[user_name] continue user_password = base64.urlsafe_b64encode(base64.b64decode(user_password)) for keyname in ['user_pkey', 'gateway_pkey']: f = Fernet(user_password) user_bundles[user_name][keyname] = f.encrypt(user_bundles[user_name][keyname]) user_bundles[user_name][keyname] = base64.b64encode( base64.urlsafe_b64decode(user_bundles[user_name][keyname]) ) log.debug("Upload key bundles for {} users".format(len(user_bundles.keys()))) for user_name in user_bundles.keys(): # send encrypted keys try: log.debug("Upload keys for {}".format(user_name)) data = { 'demo_payload': json.dumps(user_bundles[user_name]) } req = requests.post(SIGNUP_URL + '/provision/{}'.format(urllib.quote(user_name)), headers=make_auth_headers(), data=data) if req.status_code != 200: if req.status_code != 202: log.error("Failed to provision {}: HTTP {} ({})".format(user_name, req.status_code, req.text)) except Exception as e: if DEBUG: log.exception(e) return False return True
""" Set up the Volume to allow the slice to provision UGs in it, and to fire up RGs. * create the Volume Access Right for the user, so (s)he can create Gateways. * provision a single Replica Gateway, serving on localhost. """ client = connect_syndicate() try: rc = ensure_volume_access_right_exists( user_email, volume_name, caps ) assert rc is True, "Failed to create access right for %s in %s" % (user_email, volume_name) except Exception, e: logger.exception(e) return False RG_name = syndicate_provisioning.make_gateway_name( "OpenCloud", "RG", volume_name, "localhost" ) RG_key_password = syndicate_provisioning.make_gateway_private_key_password( RG_name, slice_secret ) try: rc = syndicate_provisioning.ensure_RG_exists( client, user_email, volume_name, RG_name, "localhost", RG_port, RG_key_password, closure=RG_closure ) except Exception, e: logger.exception(e) return False return True #------------------------------- def teardown_volume_access( user_email, volume_name ): """ Revoke access to a Volume for a User.