def verify_and_unseal_blob( public_key_pem, secret, blob_data ): """ verify and unseal a serialized string of JSON """ global CRYPTO_INITED if not CRYPTO_INITED: c_syndicate.crypto_init() CRYPTO_INITED = True # verify it rc, sealed_data = syndicate_crypto.verify_and_parse_json( public_key_pem, blob_data ) if rc != 0: logger.error("Failed to verify and parse blob, rc = %s" % rc) return None logger.info("Unsealing credential data") rc, data = c_syndicate.symmetric_unseal( sealed_data, secret ) if rc != 0: logger.error("Failed to unseal blob, rc = %s" % rc ) return None return data
def verify_and_unseal_blob(public_key_pem, secret, blob_data): """ verify and unseal a serialized string of JSON """ global CRYPTO_INITED if not CRYPTO_INITED: c_syndicate.crypto_init() CRYPTO_INITED = True # verify it rc, sealed_data = syndicate_crypto.verify_and_parse_json( public_key_pem, blob_data) if rc != 0: logger.error("Failed to verify and parse blob, rc = %s" % rc) return None logger.info("Unsealing credential data") rc, data = c_syndicate.symmetric_unseal(sealed_data, secret) if rc != 0: logger.error("Failed to unseal blob, rc = %s" % rc) return None return data
def ft_syndicate_principal(): """ Functional tests for creating, reading, and deleting SyndicatePrincipals. """ c_syndicate.crypto_init() print "generating key pair" pubkey_pem, privkey_pem = api.generate_key_pair(4096) user_email = "*****@*****.**" print "saving principal" key = generate_symmetric_secret() put_sealed_principal_data(user_email, key, pubkey_pem, privkey_pem) print "fetching principal private key" saved_privkey_pem = get_principal_pkey(user_email, key) assert saved_privkey_pem is not None, "Could not fetch saved private key" assert saved_privkey_pem == privkey_pem, "Saved private key does not match actual private key" print "delete principal" observer_storage.delete_principal_data(user_email) print "make sure its deleted..." saved_privkey_pem = get_principal_pkey(user_email, key) assert saved_privkey_pem is None, "Principal key not deleted" c_syndicate.crypto_shutdown()
def ft_seal_and_unseal(): """ Functional test for sealing/unsealing data """ import syndicate.observer.core as observer_core c_syndicate.crypto_init() print "generating key pair" pubkey_pem, privkey_pem = api.generate_key_pair( 4096 ) key = observer_core.generate_symmetric_secret() sealed_buf = create_sealed_and_signed_blob( privkey_pem, key, "hello world") print "sealed data is:\n\n%s\n\n" % sealed_buf buf = verify_and_unseal_blob( pubkey_pem, key, sealed_buf ) print "unsealed data is: \n\n%s\n\n" % buf c_syndicate.crypto_shutdown()
def ft_seal_and_unseal(): """ Functional test for sealing/unsealing data """ import syndicate.observer.core as observer_core c_syndicate.crypto_init() print "generating key pair" pubkey_pem, privkey_pem = api.generate_key_pair(4096) key = observer_core.generate_symmetric_secret() sealed_buf = create_sealed_and_signed_blob(privkey_pem, key, "hello world") print "sealed data is:\n\n%s\n\n" % sealed_buf buf = verify_and_unseal_blob(pubkey_pem, key, sealed_buf) print "unsealed data is: \n\n%s\n\n" % buf c_syndicate.crypto_shutdown()
def create_sealed_and_signed_blob( private_key_pem, key, data ): """ Create a sealed and signed message. """ global CRYPTO_INITED if not CRYPTO_INITED: c_syndicate.crypto_init() CRYPTO_INITED = True rc, sealed_data = c_syndicate.symmetric_seal( data, key ) if rc != 0: logger.error("Failed to seal data with the key, rc = %s" % rc) return None msg = syndicate_crypto.sign_and_serialize_json( private_key_pem, sealed_data ) if msg is None: logger.error("Failed to sign credential") return None return msg
def ft_do_push( syndicate_url, volume_name, volume_owner, slice_name, slice_secret, principal_pkey_path, hostname, automount_daemon_port, instantiate_UG=None, run_UG=None, UG_port=0, UG_closure=None, instantiate_RG=None, run_RG=None, RG_port=0, RG_closure=None, RG_global_hostname=None, instantiate_AG=None, run_AG=None, AG_port=0, AG_closure=None, AG_global_hostname=None, gateway_name_prefix="" ): """ Push credentials to a single host. """ c_syndicate.crypto_init() observer_key = syndicate_storage_api.read_private_key( CONFIG.SYNDICATE_OBSERVER_PRIVATE_KEY ) user_key = syndicate_storage_api.read_private_key( principal_pkey_path ) observer_key_pem = observer_key.exportKey() user_pkey_pem = user_key.exportKey() if observer_key_pem is None: raise Exception("Failed to read observer private key from %s" % observer_key_pem ) if user_pkey_pem is None: raise Exception("Failed to read user private key from %s" % principal_pkey_path ) # convert to binary slice_secret = binascii.unhexlify( slice_secret ) cred = observer_cred.create_slice_credential_blob( observer_key_pem, slice_name, slice_secret, syndicate_url, volume_name, volume_owner, user_pkey_pem, instantiate_UG=instantiate_UG, run_UG=run_UG, UG_port=UG_port, UG_closure=UG_closure, instantiate_RG=instantiate_RG, run_RG=run_RG, RG_port=RG_port, RG_closure=RG_closure, RG_global_hostname=RG_global_hostname, instantiate_AG=instantiate_AG, run_AG=run_AG, AG_port=AG_port, AG_closure=AG_closure, AG_global_hostname=AG_global_hostname, gateway_name_prefix=gateway_name_prefix ) if cred is None: raise Exception("Failed to generate slice credential") rc = do_push( [hostname], automount_daemon_port, cred ) c_syndicate.crypto_shutdown()
def create_sealed_and_signed_blob(private_key_pem, key, data): """ Create a sealed and signed message. """ global CRYPTO_INITED if not CRYPTO_INITED: c_syndicate.crypto_init() CRYPTO_INITED = True rc, sealed_data = c_syndicate.symmetric_seal(data, key) if rc != 0: logger.error("Failed to seal data with the key, rc = %s" % rc) return None msg = syndicate_crypto.sign_and_serialize_json(private_key_pem, sealed_data) if msg is None: logger.error("Failed to sign credential") return None return msg
def ft_credential_server_1(syndicate_url, principal_id, principal_pkey_path): """ Functional test for the credential server. Use a set of fake volume data. """ import syndicate.observer.sync as observer_sync import syndicate.observer.core as observer_core import syndicate.syndicate as c_syndicate from collections import namedtuple c_syndicate.crypto_init() observer_pkey_pem = syndicate_storage_api.read_private_key( CONFIG.SYNDICATE_OBSERVER_PRIVATE_KEY).exportKey() user_pkey_pem = syndicate_storage_api.read_private_key( principal_pkey_path).exportKey() slice_secret = binascii.unhexlify( "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") ft_volumes = ["ft_volume_1", "ft_volume_2", "ft_volume_3"] RG_ports = { "ft_volume_1": 32781, "ft_volume_2": 32783, "ft_volume_3": 32785 } UG_ports = { "ft_volume_1": 32780, "ft_volume_2": 32782, "ft_volume_3": 32784 } OpenCloudVolume = namedtuple("OpenCloudVolume", [ "name", "blocksize", "description", "private", "archive", "cap_read_data", "cap_write_data", "cap_host_data" ]) # set up some volumes for vol_name in ft_volumes: opencloud_volume = OpenCloudVolume( name=vol_name, blocksize=1024, description="Functional test volume", private=True, archive=False, cap_read_data=True, cap_write_data=True, cap_host_data=True) observer_core.ensure_volume_exists(CONFIG.SYNDICATE_OPENCLOUD_USER, opencloud_volume) # set up some RGs for the volumes for vol_name in ft_volumes: g = observer_core.setup_global_RG(CONFIG.SYNDICATE_OPENCLOUD_USER, vol_name, CONFIG.SYNDICATE_GATEWAY_NAME_PREFIX, slice_secret, RG_ports[vol_name], CONFIG.SYNDICATE_RG_CLOSURE, global_hostname="localhost") assert g == True, "Failed to ensure global RG for volume %s exists" % vol_name ft_volumeslice = { "ft_volume_1": observer_sync.VolumeSlice( volume_id=1, slice_id=1, cap_read_data=True, cap_write_data=True, cap_host_data=True, UG_portnum=UG_ports["ft_volume_1"], RG_portnum=RG_ports["ft_volume_1"], credentials_blob=observer_core.generate_slice_credentials( observer_pkey_pem, syndicate_url, principal_id, "ft_volume_1", "ft_slice_1", None, slice_secret, user_pkey_pem=user_pkey_pem, instantiate_UG=True, run_UG=True, UG_port=UG_ports["ft_volume_1"], UG_closure=None, instantiate_RG=None, run_RG=True, RG_port=RG_ports["ft_volume_1"], RG_closure=None, RG_global_hostname="localhost", instantiate_AG=None, run_AG=None, AG_port=0, AG_closure=None, gateway_name_prefix="OpenCloud")), "ft_volume_2": observer_sync.VolumeSlice( volume_id=2, slice_id=1, cap_read_data=True, cap_write_data=True, cap_host_data=True, UG_portnum=UG_ports["ft_volume_2"], RG_portnum=RG_ports["ft_volume_2"], credentials_blob=observer_core.generate_slice_credentials( observer_pkey_pem, syndicate_url, principal_id, "ft_volume_2", "ft_slice_1", None, slice_secret, user_pkey_pem=user_pkey_pem, instantiate_UG=True, run_UG=True, UG_port=UG_ports["ft_volume_2"], UG_closure=None, instantiate_RG=None, run_RG=True, RG_port=RG_ports["ft_volume_2"], RG_closure=None, RG_global_hostname="localhost", instantiate_AG=None, run_AG=None, AG_port=0, AG_closure=None, gateway_name_prefix="OpenCloud")), "ft_volume_3": observer_sync.VolumeSlice( volume_id=3, slice_id=1, cap_read_data=True, cap_write_data=False, cap_host_data=False, UG_portnum=UG_ports["ft_volume_3"], RG_portnum=RG_ports["ft_volume_3"], credentials_blob=observer_core.generate_slice_credentials( observer_pkey_pem, syndicate_url, principal_id, "ft_volume_3", "ft_slice_1", None, slice_secret, user_pkey_pem=user_pkey_pem, instantiate_UG=True, run_UG=True, UG_port=UG_ports["ft_volume_3"], UG_closure=None, instantiate_RG=None, run_RG=True, RG_port=RG_ports["ft_volume_3"], RG_closure=None, RG_global_hostname="localhost", instantiate_AG=None, run_AG=None, AG_port=0, AG_closure=None, gateway_name_prefix="OpenCloud")) } # re-programm observer_storage with test methods def ft_get_slice_secret(private_key_pem, slice_name): logger.info("get slice secret for %s" % slice_name) return slice_secret def ft_get_volumeslice_volume_names(slice_name): logger.info("get volume names for %s" % slice_name) return ["ft_volume_1", "ft_volume_2", "ft_volume_3"] def ft_get_volumeslice(volume_name, slice_name): logger.info("get volumeslice for (%s, %s)" % (volume_name, slice_name)) return ft_volumeslice.get(volume_name, None) observer_storage.get_slice_secret = ft_get_slice_secret observer_storage.get_volumeslice_volume_names = ft_get_volumeslice_volume_names observer_storage.get_volumeslice = ft_get_volumeslice ensure_credential_server_running(run_once=True, foreground=True) c_syndicate.crypto_shutdown()
def ft_credential_server_1( syndicate_url, principal_id, principal_pkey_path ): """ Functional test for the credential server. Use a set of fake volume data. """ import syndicate.observer.sync as observer_sync import syndicate.observer.core as observer_core import syndicate.syndicate as c_syndicate from collections import namedtuple c_syndicate.crypto_init() observer_pkey_pem = syndicate_storage_api.read_private_key( CONFIG.SYNDICATE_OBSERVER_PRIVATE_KEY ).exportKey() user_pkey_pem = syndicate_storage_api.read_private_key( principal_pkey_path ).exportKey() slice_secret = binascii.unhexlify( "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" ) ft_volumes = ["ft_volume_1", "ft_volume_2", "ft_volume_3"] RG_ports = { "ft_volume_1": 32781, "ft_volume_2": 32783, "ft_volume_3": 32785 } UG_ports = { "ft_volume_1": 32780, "ft_volume_2": 32782, "ft_volume_3": 32784 } OpenCloudVolume = namedtuple("OpenCloudVolume", ["name", "blocksize", "description", "private", "archive", "cap_read_data", "cap_write_data", "cap_host_data"]) # set up some volumes for vol_name in ft_volumes: opencloud_volume = OpenCloudVolume( name = vol_name, blocksize = 1024, description = "Functional test volume", private = True, archive = False, cap_read_data = True, cap_write_data = True, cap_host_data = True ) observer_core.ensure_volume_exists( CONFIG.SYNDICATE_OPENCLOUD_USER, opencloud_volume ) # set up some RGs for the volumes for vol_name in ft_volumes: g = observer_core.setup_global_RG( CONFIG.SYNDICATE_OPENCLOUD_USER, vol_name, CONFIG.SYNDICATE_GATEWAY_NAME_PREFIX, slice_secret, RG_ports[vol_name], CONFIG.SYNDICATE_RG_CLOSURE, global_hostname="localhost" ) assert g == True, "Failed to ensure global RG for volume %s exists" % vol_name ft_volumeslice = { "ft_volume_1": observer_sync.VolumeSlice( volume_id=1, slice_id=1, cap_read_data=True, cap_write_data=True, cap_host_data=True, UG_portnum=UG_ports["ft_volume_1"], RG_portnum=RG_ports["ft_volume_1"], credentials_blob=observer_core.generate_slice_credentials(observer_pkey_pem, syndicate_url, principal_id, "ft_volume_1", "ft_slice_1", None, slice_secret, user_pkey_pem=user_pkey_pem, instantiate_UG=True, run_UG=True, UG_port=UG_ports["ft_volume_1"], UG_closure=None, instantiate_RG=None, run_RG=True, RG_port=RG_ports["ft_volume_1"], RG_closure=None, RG_global_hostname="localhost", instantiate_AG=None, run_AG=None, AG_port=0, AG_closure=None, gateway_name_prefix="OpenCloud") ), "ft_volume_2": observer_sync.VolumeSlice( volume_id=2, slice_id=1, cap_read_data=True, cap_write_data=True, cap_host_data=True, UG_portnum=UG_ports["ft_volume_2"], RG_portnum=RG_ports["ft_volume_2"], credentials_blob=observer_core.generate_slice_credentials(observer_pkey_pem, syndicate_url, principal_id, "ft_volume_2", "ft_slice_1", None, slice_secret, user_pkey_pem=user_pkey_pem, instantiate_UG=True, run_UG=True, UG_port=UG_ports["ft_volume_2"], UG_closure=None, instantiate_RG=None, run_RG=True, RG_port=RG_ports["ft_volume_2"], RG_closure=None, RG_global_hostname="localhost", instantiate_AG=None, run_AG=None, AG_port=0, AG_closure=None, gateway_name_prefix="OpenCloud") ), "ft_volume_3": observer_sync.VolumeSlice( volume_id=3, slice_id=1, cap_read_data=True, cap_write_data=False, cap_host_data=False, UG_portnum=UG_ports["ft_volume_3"], RG_portnum=RG_ports["ft_volume_3"], credentials_blob=observer_core.generate_slice_credentials(observer_pkey_pem, syndicate_url, principal_id, "ft_volume_3", "ft_slice_1", None, slice_secret, user_pkey_pem=user_pkey_pem, instantiate_UG=True, run_UG=True, UG_port=UG_ports["ft_volume_3"], UG_closure=None, instantiate_RG=None, run_RG=True, RG_port=RG_ports["ft_volume_3"], RG_closure=None, RG_global_hostname="localhost", instantiate_AG=None, run_AG=None, AG_port=0, AG_closure=None, gateway_name_prefix="OpenCloud") ) } # re-programm observer_storage with test methods def ft_get_slice_secret( private_key_pem, slice_name ): logger.info("get slice secret for %s" % slice_name ) return slice_secret def ft_get_volumeslice_volume_names( slice_name ): logger.info("get volume names for %s" % slice_name ) return ["ft_volume_1", "ft_volume_2", "ft_volume_3"] def ft_get_volumeslice( volume_name, slice_name ): logger.info("get volumeslice for (%s, %s)" % (volume_name, slice_name)) return ft_volumeslice.get( volume_name, None ) observer_storage.get_slice_secret = ft_get_slice_secret observer_storage.get_volumeslice_volume_names = ft_get_volumeslice_volume_names observer_storage.get_volumeslice = ft_get_volumeslice ensure_credential_server_running( run_once=True, foreground=True ) c_syndicate.crypto_shutdown()