def remove_publisher_trusted_algorithm_publisher( asset_or_did: Union[str, V3Asset], publisher_address: str, metadata_cache_uri: str) -> list: """ :return: List of trusted algo publishers not containing `publisher_address`. """ if isinstance(asset_or_did, V3Asset): asset = asset_or_did else: asset = resolve_asset(asset_or_did, metadata_cache_uri=metadata_cache_uri) trusted_algorithm_publishers = [ tp.lower() for tp in asset.get_trusted_algorithm_publishers() ] publisher_address = publisher_address.lower() if not trusted_algorithm_publishers: raise ValueError( f"Publisher {publisher_address} is not in trusted algorith publishers of this asset." ) trusted_algorithm_publishers = [ tp for tp in trusted_algorithm_publishers if tp != publisher_address ] trusted_algorithms = asset.get_trusted_algorithms() asset.update_compute_privacy(trusted_algorithms, trusted_algorithm_publishers, False, False) assert ( asset.get_trusted_algorithm_publishers() == trusted_algorithm_publishers ), "New trusted algorithm publisher was not removed. Failed when updating the list of trusted algo publishers. " return trusted_algorithm_publishers
def remove_publisher_trusted_algorithm(asset_or_did: Union[str, V3Asset], algo_did: str, metadata_cache_uri: str) -> list: """ :return: List of trusted algos not containing `algo_did`. """ if isinstance(asset_or_did, V3Asset): asset = asset_or_did else: asset = resolve_asset(asset_or_did, metadata_cache_uri=metadata_cache_uri) trusted_algorithms = asset.get_trusted_algorithms() if not trusted_algorithms: raise ValueError( f"Algorithm {algo_did} is not in trusted algorithms of this asset." ) trusted_algorithms = [ ta for ta in trusted_algorithms if ta["did"] != algo_did ] trusted_algo_publishers = asset.get_trusted_algorithm_publishers() asset.update_compute_privacy(trusted_algorithms, trusted_algo_publishers, False, False) assert ( asset.get_trusted_algorithms() == trusted_algorithms ), "New trusted algorithm was not removed. Failed when updating the list of trusted algorithms. " return trusted_algorithms
def generate_trusted_algo_dict( asset_or_did: Union[str, V3Asset] = None, metadata_cache_uri: Optional[str] = None) -> dict: """ :return: Object as follows: ``` { "did": <did>, "filesChecksum": <str>, "containerSectionChecksum": <str> } ``` """ if isinstance(asset_or_did, V3Asset): ddo = asset_or_did else: ddo = resolve_asset(asset_or_did, metadata_cache_uri=metadata_cache_uri) algo_metadata = ddo.metadata return { "did": ddo.did, "filesChecksum": create_checksum( algo_metadata.get("encryptedFiles", "") + json.dumps(algo_metadata["main"]["files"], separators=(",", ":"))), "containerSectionChecksum": create_checksum( json.dumps(algo_metadata["main"]["algorithm"]["container"], separators=(",", ":"))), }
def _get_service_endpoint(self, did, asset=None): if not asset: asset = resolve_asset(did, self._config.aquarius_url) return self._data_provider.build_compute_endpoint( ServiceAgreement.from_ddo(ServiceTypes.CLOUD_COMPUTE, asset).service_endpoint)
def resolve(self, did: str) -> Asset: """ When you pass a did retrieve the ddo associated. :param did: DID, str :return: Asset instance """ return resolve_asset(did, metadata_store_url=self._config.aquarius_url)
def resolve(self, did: str) -> V3Asset: """ When you pass a did retrieve the ddo associated. :param did: DID, str :return: Asset instance """ return resolve_asset(did, metadata_cache_uri=self._config.metadata_cache_uri)
def _get_compute_result_file_endpoint( self, did: str, asset: Optional[V3Asset] = None) -> Tuple[str, str]: if not asset: asset = resolve_asset(did, self._config.metadata_cache_uri) return self._data_provider.build_compute_result_file_endpoint( asset.get_service(ServiceTypes.CLOUD_COMPUTE).service_endpoint)
def start(self, did: str, consumer_wallet: Wallet, order_tx_id: str, nonce: [int, None] = None, algorithm_did: [str, None] = None, algorithm_meta: [AlgorithmMetadata, None] = None, algorithm_tx_id: str = '', algorithm_data_token: str = '', output: dict = None, job_id: str = None): """Start a remote compute job on the asset files identified by `did` after verifying that the provider service is active and transferring the number of data-tokens required for using this compute service. :param did: str -- id of asset that has the compute service :param consumer_wallet: Wallet instance of the consumer ordering the service :param order_tx_id: hex str -- id of the startOrder transaction (tx hash) :param nonce: int value to use in the signature :param algorithm_did: str -- the asset did (of `algorithm` type) which consist of `did:op:` and the assetId hex str (without `0x` prefix) :param algorithm_meta: `AlgorithmMetadata` instance -- metadata about the algorithm being run if `algorithm` is being used. This is ignored when `algorithm_did` is specified. :param algorithm_tx_id: transaction hash of algorithm StartOrder tx (Required when using `algorithm_did`) :param algorithm_data_token: datatoken address of this algorithm (Required when using `algorithm_did`) :param output: dict object to be used in publishing mechanism, must define :param job_id: str identifier of a compute job that was previously started and stopped (if supported by the provider's backend) :return: str -- id of compute job being executed """ assert algorithm_did or algorithm_meta, 'either an algorithm did or an algorithm meta must be provided.' output = OceanCompute.check_output_dict( output, consumer_wallet.address, data_provider=self._data_provider) asset = resolve_asset(did, metadata_store_url=self._config.aquarius_url) service_endpoint = self._get_service_endpoint(did, asset) sa = ServiceAgreement.from_ddo(ServiceTypes.CLOUD_COMPUTE, asset) signature = self._sign_message(consumer_wallet, f'{consumer_wallet.address}{did}', nonce=nonce) job_info = self._data_provider.start_compute_job( did, service_endpoint, consumer_wallet.address, signature, sa.index, asset.data_token_address, order_tx_id, algorithm_did, algorithm_meta, algorithm_tx_id, algorithm_data_token, output, job_id) return job_info['jobId']
def test_resolve_asset(publisher_ocean_instance, metadata): publisher = get_publisher_wallet() metadata_copy = metadata.copy() blob = json.dumps({ "t": 1, "url": publisher_ocean_instance.config.metadata_cache_uri }) asset = publisher_ocean_instance.assets.create(metadata_copy, publisher, dt_blob=blob) wait_for_ddo(publisher_ocean_instance, asset.did) assert asset is not None, "The asset is not cached." assert isinstance(asset, V3Asset), "The asset does not have Asset instance." # resolve asset from metadata_cache_uri resolved_asset_from_metadata_cache_uri = resolve_asset( asset.did, metadata_cache_uri=publisher_ocean_instance.config.metadata_cache_uri) assert isinstance( resolved_asset_from_metadata_cache_uri, V3Asset), "The resolved asset is not an instance of Asset." assert (resolved_asset_from_metadata_cache_uri.did == asset.did ), "Resolve asset function call is unsuccessful." # resolve asset from web3 and token_address resolved_asset_from_web3_and_token_address = resolve_asset( asset.did, web3=publisher_ocean_instance.web3, token_address=asset.data_token_address, ) assert isinstance( resolved_asset_from_web3_and_token_address, V3Asset), "The resolved asset is not an instance of Asset." assert (resolved_asset_from_web3_and_token_address.did == asset.did ), "Resolve asset function call is unsuccessful."
def remove_publisher_trusted_algorithm(dataset_did: str, algo_did: str, metadata_store_url: str) -> list: asset = resolve_asset(dataset_did, metadata_store_url=metadata_store_url) trusted_algorithms = asset.get_trusted_algorithms() if not trusted_algorithms: raise ValueError( f"Algorithm {algo_did} is not in trusted algorithms of this asset." ) trusted_algorithms = [ ta for ta in trusted_algorithms if ta["did"] != algo_did ] asset.update_compute_privacy(trusted_algorithms, False, False) assert asset.get_trusted_algorithms() == trusted_algorithms return trusted_algorithms
def add_publisher_trusted_algorithm(dataset_did: str, algo_did: str, metadata_store_url: str) -> list: asset = resolve_asset(dataset_did, metadata_store_url=metadata_store_url) compute_service = asset.get_service(ServiceTypes.CLOUD_COMPUTE) assert ( compute_service ), "Cannot add trusted algorithm to this asset because it has no compute service." privacy_values = compute_service.attributes["main"].get("privacy") if not privacy_values: privacy_values = {} compute_service.attributes["main"]["privacy"] = privacy_values assert isinstance(privacy_values, dict) trusted_algos = privacy_values.get("publisherTrustedAlgorithms", []) # remove algo_did if already in the list trusted_algos = [ta for ta in trusted_algos if ta["did"] != algo_did] # now add this algo_did as trusted algo algo_ddo = resolve_asset(algo_did, metadata_store_url=metadata_store_url) trusted_algos.append(generate_trusted_algo_dict(ddo=algo_ddo)) # update with the new list privacy_values["publisherTrustedAlgorithms"] = trusted_algos assert compute_service.attributes["main"]["privacy"] == privacy_values return trusted_algos
def add_publisher_trusted_algorithm(asset_or_did: Union[str, V3Asset], algo_did: str, metadata_cache_uri: str) -> list: """ :return: List of trusted algos """ if isinstance(asset_or_did, V3Asset): asset = asset_or_did else: asset = resolve_asset(asset_or_did, metadata_cache_uri=metadata_cache_uri) compute_service = asset.get_service(ServiceTypes.CLOUD_COMPUTE) assert ( compute_service ), "Cannot add trusted algorithm to this asset because it has no compute service." privacy_values = compute_service.attributes["main"].get("privacy") if not privacy_values: privacy_values = {} compute_service.attributes["main"]["privacy"] = privacy_values assert isinstance(privacy_values, dict), "Privacy key is not a dictionary." trusted_algos = privacy_values.get("publisherTrustedAlgorithms", []) # remove algo_did if already in the list trusted_algos = [ta for ta in trusted_algos if ta["did"] != algo_did] # now add this algo_did as trusted algo algo_ddo = resolve_asset(algo_did, metadata_cache_uri=metadata_cache_uri) trusted_algos.append(generate_trusted_algo_dict(asset_or_did=algo_ddo)) # update with the new list privacy_values["publisherTrustedAlgorithms"] = trusted_algos assert ( compute_service.attributes["main"]["privacy"] == privacy_values ), "New trusted algorithm was not added. Failed when updating the privacy key. " return trusted_algos
def generate_trusted_algo_dict(did: str = None, metadata_store_url: str = None, ddo: Asset = None): assert ddo or (did and metadata_store_url) if not ddo: ddo = resolve_asset(did, metadata_store_url=metadata_store_url) algo_metadata = ddo.metadata return { "did": ddo.did, "filesChecksum": create_checksum( algo_metadata["encryptedFiles"] + json.dumps(algo_metadata["main"]["files"], separators=(",", ":"))), "containerSectionChecksum": create_checksum( json.dumps(algo_metadata["main"]["algorithm"]["container"], separators=(",", ":"))), }
def add_publisher_trusted_algorithm_publisher(asset_or_did: Union[str, V3Asset], publisher_address: str, metadata_cache_uri: str) -> list: """ :return: List of trusted algo publishers """ if isinstance(asset_or_did, V3Asset): asset = asset_or_did else: asset = resolve_asset(asset_or_did, metadata_cache_uri=metadata_cache_uri) compute_service = asset.get_service(ServiceTypes.CLOUD_COMPUTE) assert ( compute_service ), "Cannot add trusted algorithm to this asset because it has no compute service." privacy_values = compute_service.attributes["main"].get("privacy") if not privacy_values: privacy_values = {} compute_service.attributes["main"]["privacy"] = privacy_values assert isinstance(privacy_values, dict), "Privacy key is not a dictionary." trusted_algo_publishers = [ tp.lower() for tp in privacy_values.get("publisherTrustedAlgorithmPublishers", []) ] publisher_address = publisher_address.lower() if publisher_address in trusted_algo_publishers: return trusted_algo_publishers trusted_algo_publishers.append(publisher_address) # update with the new list privacy_values[ "publisherTrustedAlgorithmPublishers"] = trusted_algo_publishers assert ( compute_service.attributes["main"]["privacy"] == privacy_values ), "New trusted algorithm was not added. Failed when updating the privacy key. " return trusted_algo_publishers
def start( self, input_datasets: list, consumer_wallet: Wallet, nonce: Optional[int] = None, algorithm_did: Optional[str] = None, algorithm_meta: Optional[AlgorithmMetadata] = None, algorithm_tx_id: str = None, algorithm_data_token: str = None, output: dict = None, job_id: str = None, ): """ Start a remote compute job on the asset files. Files are identified by `did` after verifying that the provider service is active and transferring the number of data-tokens required for using this compute service. :param input_datasets: list of ComputeInput -- list of input datasets to the compute job. A dataset is represented with ComputeInput struct :param consumer_wallet: Wallet instance of the consumer ordering the service :param nonce: int value to use in the signature :param algorithm_did: str -- the asset did (of `algorithm` type) which consist of `did:op:` and the assetId hex str (without `0x` prefix) :param algorithm_meta: `AlgorithmMetadata` instance -- metadata about the algorithm being run if `algorithm` is being used. This is ignored when `algorithm_did` is specified. :param algorithm_tx_id: transaction hash of algorithm StartOrder tx (Required when using `algorithm_did`) :param algorithm_data_token: datatoken address of this algorithm (Required when using `algorithm_did`) :param output: dict object to be used in publishing mechanism, must define :param job_id: str identifier of a compute job that was previously started and stopped (if supported by the provider's backend) :return: str -- id of compute job being executed """ assert ( algorithm_did or algorithm_meta ), "either an algorithm did or an algorithm meta must be provided." for i in input_datasets: assert isinstance(i, ComputeInput) first_input = input_datasets[0] did = first_input.did order_tx_id = first_input.transfer_tx_id service_id = first_input.service_id output = OceanCompute.check_output_dict( output, consumer_wallet.address, data_provider=self._data_provider ) asset = resolve_asset(did, metadata_cache_uri=self._config.metadata_cache_uri) _, service_endpoint = self._get_service_endpoint(did, asset) service = asset.get_service_by_index(service_id) sa = ServiceAgreement.from_json(service.as_dictionary()) assert ( ServiceTypes.CLOUD_COMPUTE == sa.type ), "service at serviceId is not of type compute service." signature = self._sign_message( consumer_wallet, f"{consumer_wallet.address}{did}", nonce=nonce, service_endpoint=sa.service_endpoint, ) try: job_info = self._data_provider.start_compute_job( did, service_endpoint, consumer_wallet.address, signature, sa.index, order_tx_id, algorithm_did, algorithm_meta, algorithm_tx_id, algorithm_data_token, output, input_datasets, job_id, ) return job_info["jobId"] except ValueError: raise
def test_bad_resolved_asset(): with pytest.raises(AssertionError) as err: resolve_asset("0x1") assert ( err.value.args[0] == "Either metadata_cache_uri or (web3 and token_address) is required.")