def __init__(self, web3, config_file, metadata_contract=None):
        self._oceandb = OceanDb(config_file).plugin

        self._other_db_index = f'{self._oceandb.driver.db_index}_plus'
        self._oceandb.driver.es.indices.create(index=self._other_db_index, ignore=400)

        self._web3 = web3
        self._pool_monitor = None
        if bool(int(os.getenv('PROCESS_POOL_EVENTS', 1)) == 1):
            self._pool_monitor = MetadataUpdater(
                self._oceandb,
                self._other_db_index,
                self._web3,
                ConfigProvider.get_config()
            )

        if not metadata_contract:
            metadata_contract = get_metadata_contract(self._web3)

        self._contract = metadata_contract
        self._contract_address = self._contract.address

        self._ecies_private_key = os.getenv('EVENTS_ECIES_PRIVATE_KEY', '')
        self._ecies_account = None
        if self._ecies_private_key:
            self._ecies_account = Account.privateKeyToAccount(self._ecies_private_key)

        metadata_block = int(os.getenv('METADATA_CONTRACT_BLOCK', 0))
        try:
            self.get_last_processed_block()
        except Exception:
            self.store_last_processed_block(metadata_block)

        allowed_publishers = set()
        try:
            publishers_str = os.getenv('ALLOWED_PUBLISHERS', '')
            allowed_publishers = set(json.loads(publishers_str)) if publishers_str else set()
        except (JSONDecodeError, TypeError, Exception) as e:
            logger.error(f'Reading list of allowed publishers failed: {e}\n'
                         f'ALLOWED_PUBLISHER is set to "{os.getenv("ALLOWED_PUBLISHER")}"')

        self._allowed_publishers = set(sanitize_addresses(allowed_publishers))
        logger.debug(f'allowed publishers: {self._allowed_publishers}')

        logger.debug(f'EventsMonitor: using Metadata contract address {self._contract_address}.')
        self._monitor_is_on = False
        default_sleep_time = 10
        try:
            self._monitor_sleep_time = int(os.getenv('OCN_EVENTS_MONITOR_QUITE_TIME', default_sleep_time))
        except ValueError:
            self._monitor_sleep_time = default_sleep_time

        self._monitor_sleep_time = max(self._monitor_sleep_time, default_sleep_time)
        if not self._contract or not self._web3.isAddress(self._contract_address):
            logger.error(
                f"Contract address {self._contract_address} is not a valid address. Events thread not starting")
            self._contract = None
        self._purgatory_enabled = bool(int(os.getenv('PROCESS_PURGATORY', 1)) == 1)
        self._purgatory_list = set()
        self._purgatory_update_time = None
Esempio n. 2
0
 def create_compute_service(attributes, provider_uri=None):
     service_endpoint = provider_uri or DataServiceProvider.get_url(
         ConfigProvider.get_config()
     )
     return ServiceDescriptor.compute_service_descriptor(
         attributes, service_endpoint
     )
Esempio n. 3
0
def test_build_specific_endpoints():
    """Tests that a specific list of agreed endpoints is supported on the DataServiceProvider."""
    config = ConfigProvider.get_config()
    endpoints = TEST_SERVICE_ENDPOINTS

    def get_service_endpoints(_provider_uri=None):
        return TEST_SERVICE_ENDPOINTS.copy()

    original_func = DataSP.get_service_endpoints
    DataSP.get_service_endpoints = get_service_endpoints

    base_uri = DataSP.get_root_uri(config.provider_url)
    assert DataSP.build_download_endpoint()[1] == urljoin(
        base_uri, endpoints["download"][1])
    assert DataSP.build_initialize_endpoint()[1] == urljoin(
        base_uri, endpoints["initialize"][1])
    assert DataSP.build_encrypt_endpoint()[1] == urljoin(
        base_uri, endpoints["encrypt"][1])
    assert DataSP.build_fileinfo()[1] == urljoin(base_uri,
                                                 endpoints["fileinfo"][1])
    assert DataSP.build_compute_endpoint()[1] == urljoin(
        base_uri, endpoints["computeStatus"][1])
    assert DataSP.build_compute_endpoint()[1] == urljoin(
        base_uri, endpoints["computeStart"][1])
    assert DataSP.build_compute_endpoint()[1] == urljoin(
        base_uri, endpoints["computeStop"][1])
    assert DataSP.build_compute_endpoint()[1] == urljoin(
        base_uri, endpoints["computeDelete"][1])

    DataSP.get_service_endpoints = original_func
Esempio n. 4
0
def test_build_endpoint():
    """Tests that service endpoints are correctly built from URL and service name."""
    def get_service_endpoints(_provider_uri=None):
        _endpoints = TEST_SERVICE_ENDPOINTS.copy()
        _endpoints.update(
            {"newEndpoint": ["GET", "/api/v1/services/newthing"]})
        return _endpoints

    original_func = DataSP.get_service_endpoints
    DataSP.get_service_endpoints = get_service_endpoints
    config = ConfigProvider.get_config()

    endpoints = get_service_endpoints()
    uri = "http://ppp.com"
    method, endpnt = DataSP.build_endpoint("newEndpoint", provider_uri=uri)
    assert endpnt == urljoin(uri, endpoints["newEndpoint"][1])
    # config has no effect when provider_uri is set
    assert (endpnt == DataSP.build_endpoint("newEndpoint",
                                            provider_uri=uri,
                                            config=config)[1])

    method, endpnt = DataSP.build_endpoint("newEndpoint", config=config)
    assert endpnt == urljoin(DataSP.get_root_uri(config.provider_url),
                             endpoints["newEndpoint"][1])
    assert (endpnt == DataSP.build_endpoint(
        "newEndpoint", provider_uri=config.provider_url)[1])

    uri = "http://ppp.com:8030/api/v1/services/newthing"
    method, endpnt = DataSP.build_endpoint("download", provider_uri=uri)
    assert method == endpoints["download"][0]
    assert endpnt == urljoin(DataSP.get_root_uri(uri),
                             endpoints["download"][1])

    DataSP.get_service_endpoints = original_func
def delist_ddo(did):
    assert request.json and isinstance(request.json,
                                       dict), 'invalid payload format.'
    data = request.json
    address = data.get('adminAddress', None)
    if not address or not has_update_request_permission(address):
        return jsonify(error=f'Unauthorized.'), 401

    _address = None
    signature = data.get('signature', None)
    if signature:
        _address = get_signer_address(address, signature, logger)

    if not _address or _address.lower() != address.lower():
        return jsonify(error=f'Unauthorized.'), 401

    try:
        asset_record = dao.get(did)
        if not asset_record:
            return jsonify(error=f'Asset {did} not found.'), 404

        updater = MetadataUpdater(oceandb=dao.oceandb,
                                  web3=Web3Provider.get_web3(),
                                  config=ConfigProvider.get_config())
        updater.do_single_update(asset_record)

        return jsonify('acknowledged.'), 200
    except Exception as e:
        logger.error(f'get_metadata: {str(e)}')
        return f'{did} asset DID is not in OceanDB', 404
Esempio n. 6
0
    def build_service_privacy_attributes(
        trusted_algorithms: list = None,
        allow_raw_algorithm: bool = False,
        allow_all_published_algorithms: bool = False,
        allow_network_access: bool = False,
    ):
        """
        :param trusted_algorithms: list of algorithm did to be trusted by the compute service provider
        :param allow_raw_algorithm: bool -- when True, unpublished raw algorithm code can be run on this dataset
        :param allow_all_published_algorithms: bool -- when True, any published algorithm can be run on this dataset
            The list of `trusted_algorithms` will be ignored in this case.
        :param allow_network_access: bool -- allow/disallow the algorithm network access during execution
        :return: dict
        """
        privacy = {
            "allowRawAlgorithm": allow_raw_algorithm,
            "allowAllPublishedAlgorithms": allow_all_published_algorithms,
            "publisherTrustedAlgorithms": [],
            "allowNetworkAccess": allow_network_access,
        }
        if trusted_algorithms:
            privacy["publisherTrustedAlgorithms"] = create_publisher_trusted_algorithms(
                trusted_algorithms, ConfigProvider.get_config().metadata_cache_uri
            )

        return privacy
Esempio n. 7
0
    def check_output_dict(output_def, consumer_address, data_provider, config=None):
        """
        Validate the `output_def` dict and fills in defaults for missing values.

        :param output_def: dict
        :param consumer_address: hex str the consumer ethereum address
        :param data_provider:  DataServiceProvider class or similar interface
        :param config: Config instance
        :return: dict a valid `output_def` object
        """
        if not config:
            config = ConfigProvider.get_config()

        default_output_def = {
            "nodeUri": config.network_url,
            "brizoUri": data_provider.get_url(config),
            "brizoAddress": config.provider_address,
            "metadata": dict(),
            "metadataUri": config.metadata_cache_uri,
            "owner": consumer_address,
            "publishOutput": 0,
            "publishAlgorithmLog": 0,
            "whitelist": [],
        }

        output_def = output_def if isinstance(output_def, dict) else dict()
        default_output_def.update(output_def)
        return default_output_def
 def get_provider_address(provider_uri=None):
     """
     Return the provider address
     """
     if not provider_uri:
         provider_uri = ConfigProvider.get_config().provider_url
     provider_info = DataServiceProvider._http_method("get", provider_uri).json()
     return provider_info["providerAddress"]
Esempio n. 9
0
    def get_service_endpoints():
        """
        Return the service endpoints from the provider URL.
        """
        if DataServiceProvider.provider_info is None:
            config = ConfigProvider.get_config()
            DataServiceProvider.provider_info = requests.get(config.provider_url).json()

        return DataServiceProvider.provider_info['serviceEndpoints']
Esempio n. 10
0
    def build_endpoint(service_name, provider_uri=None, config=None):
        if not provider_uri:
            config = config or ConfigProvider.get_config()
            provider_uri = DataServiceProvider.get_url(config)

        provider_uri = DataServiceProvider.get_root_uri(provider_uri)
        service_endpoints = DataServiceProvider.get_service_endpoints(provider_uri)

        method, url = service_endpoints[service_name]
        return method, urljoin(provider_uri, url)
Esempio n. 11
0
    def get_service_endpoints(provider_uri=None):
        """
        Return the service endpoints from the provider URL.
        """
        if not provider_uri:
            provider_uri = DataServiceProvider.get_url(ConfigProvider.get_config())

        provider_info = DataServiceProvider._http_method("get", provider_uri).json()

        return provider_info["serviceEndpoints"]
    def download_service(
        did,
        service_endpoint,
        wallet,
        files,
        destination_folder,
        service_id,
        token_address,
        order_tx_id,
        index=None,
    ):
        """
        Call the provider endpoint to get access to the different files that form the asset.

        :param did: str id of the asset
        :param service_endpoint: Url to consume, str
        :param wallet: hex str Wallet instance of the consumer signing this request
        :param files: List containing the files to be consumed, list
        :param destination_folder: Path, str
        :param service_id: integer the id of the service inside the DDO's service dict
        :param token_address: hex str the data token address associated with this asset/service
        :param order_tx_id: hex str the transaction hash for the required data token
            transfer (tokens of the same token address above)
        :param index: Index of the document that is going to be downloaded, int
        :return: True if was downloaded, bool
        """
        indexes = range(len(files))
        if index is not None:
            assert isinstance(index,
                              int), logger.error("index has to be an integer.")
            assert index >= 0, logger.error(
                "index has to be 0 or a positive integer.")
            assert index < len(files), logger.error(
                "index can not be bigger than the number of files")
            indexes = [index]

        base_url = (f"{service_endpoint}"
                    f"?documentId={did}"
                    f"&serviceId={service_id}"
                    f"&serviceType={ServiceTypes.ASSET_ACCESS}"
                    f"&dataToken={token_address}"
                    f"&transferTxId={order_tx_id}"
                    f"&consumerAddress={wallet.address}")
        config = ConfigProvider.get_config()
        for i in indexes:
            signature = DataServiceProvider.sign_message(wallet, did, config)
            download_url = base_url + f"&signature={signature}&fileIndex={i}"
            logger.info(
                f"invoke consume endpoint with this url: {download_url}")
            response = DataServiceProvider._http_client.get(download_url,
                                                            stream=True)
            file_name = DataServiceProvider._get_file_name(response)
            DataServiceProvider.write_file(response, destination_folder,
                                           file_name or f"file-{i}")
Esempio n. 13
0
def enforce_types_shim(func):
    try:
        c = ConfigProvider.get_config()
    except AssertionError:  # handle if ConfigProvider.set_config() not done yet
        c = config_defaults

    val = c["util"][NAME_TYPECHECK]
    typecheck = distutils.util.strtobool(val)

    if typecheck:
        return enforce_types(func)
    return func
 def get_provider_address(provider_uri=None):
     """
     Return the provider address
     """
     if not provider_uri:
         if DataServiceProvider.provider_info is None:
             config = ConfigProvider.get_config()
             DataServiceProvider.provider_info = requests.get(
                 config.provider_url).json()
         return DataServiceProvider.provider_info["provider-address"]
     provider_info = requests.get(provider_uri).json()
     return provider_info["provider-address"]
Esempio n. 15
0
def test_update_trusted_algorithms():
    setup = Setup()

    config = ConfigProvider.get_config()
    ddo_address = get_contracts_addresses(
        "ganache", config)[MetadataContract.CONTRACT_NAME]
    ddo_registry = MetadataContract(ddo_address)

    # Setup algorithm meta to run raw algorithm
    algorithm_ddo = get_registered_algorithm_ddo(
        setup.publisher_ocean_instance, setup.publisher_wallet)
    # verify the ddo is available in Aquarius
    _ = setup.publisher_ocean_instance.assets.resolve(algorithm_ddo.did)

    # Dataset with compute service
    compute_ddo = get_registered_ddo_with_compute_service(
        setup.publisher_ocean_instance,
        setup.publisher_wallet,
        trusted_algorithms=[algorithm_ddo.did],
    )
    # verify the ddo is available in Aquarius
    _ = setup.publisher_ocean_instance.assets.resolve(compute_ddo.did)
    trusted_algo_list = create_publisher_trusted_algorithms(
        [algorithm_ddo.did],
        setup.publisher_ocean_instance.config.aquarius_url)
    compute_ddo.update_compute_privacy(trusted_algorithms=trusted_algo_list,
                                       allow_all=False,
                                       allow_raw_algorithm=False)

    tx_id = setup.publisher_ocean_instance.assets.update(
        compute_ddo, setup.publisher_wallet)

    tx_receipt = ddo_registry.get_tx_receipt(tx_id)
    logs = ddo_registry.event_MetadataUpdated.processReceipt(tx_receipt)
    assert logs[0].args.dataToken == compute_ddo.data_token_address

    wait_for_update(
        setup.publisher_ocean_instance,
        compute_ddo.did,
        "privacy",
        {"publisherTrustedAlgorithms": [algorithm_ddo.did]},
    )

    compute_ddo_updated = setup.publisher_ocean_instance.assets.resolve(
        compute_ddo.did)

    run_compute_test(
        setup.consumer_ocean_instance,
        setup.publisher_wallet,
        setup.consumer_wallet,
        [compute_ddo_updated],
        algo_ddo=algorithm_ddo,
    )
Esempio n. 16
0
    def create_access_service(attributes, provider_uri=None):
        """Publish an asset with an `Access` service according to the supplied attributes.

        :param attributes: attributes of the access service, dict
        :param provider_uri: str URL of service provider. This will be used as base to
            construct the serviceEndpoint for the `access` (download) service
        :return: Service instance or None
        """
        service_endpoint = provider_uri or DataServiceProvider.get_url(
            ConfigProvider.get_config())
        service = ServiceDescriptor.access_service_descriptor(
            attributes, service_endpoint)
        return service
Esempio n. 17
0
    def get_provider_address(provider_uri=None):
        """
        Return the provider address
        """
        try:
            if not provider_uri:
                provider_uri = ConfigProvider.get_config().provider_url
            provider_info = DataServiceProvider._http_method(
                "get", provider_uri).json()

            return provider_info["providerAddress"]
        except requests.exceptions.RequestException:
            pass

        return None
Esempio n. 18
0
    def get_service_endpoints(provider_uri=None):
        """
        Return the service endpoints from the provider URL.
        """
        if not provider_uri:
            provider_uri = DataServiceProvider.get_url(
                ConfigProvider.get_config())

        api_version = DataServiceProvider.get_api_version()
        if api_version in provider_uri:
            i = provider_uri.find(api_version)
            provider_uri = provider_uri[:i]
        provider_info = DataServiceProvider._http_method("get",
                                                         provider_uri).json()

        return provider_info["serviceEndpoints"]
Esempio n. 19
0
    def build_endpoint(service_name, provider_uri=None, config=None):
        if not provider_uri:
            config = config or ConfigProvider.get_config()
            provider_uri = DataServiceProvider.get_url(config)

        provider_uri = DataServiceProvider._remove_slash(provider_uri)
        parts = provider_uri.split('/')
        if parts[-2] == 'services':
            base_url = '/'.join(parts[:-2])
            return f'{base_url}/services/initialize'

        api_version = DataServiceProvider.get_api_version()
        if api_version not in provider_uri:
            provider_uri = f'{provider_uri}/{api_version}'

        return f'{provider_uri}/services/{service_name}'
Esempio n. 20
0
def get_user_balances(userAddress):
    """

    :param userAddress:
    :return:
    """
    try:
        data = get_request_data(request) or {}
        from_block = data.get('fromBlock', int(os.getenv('BFACTORY_BLOCK', 0)))
        ocean = Ocean(ConfigProvider.get_config())
        result = ocean.pool.get_user_balances(userAddress, from_block)
        return Response(json.dumps(result),
                        200,
                        content_type='application/json')
    except Exception as e:
        logger.error(f'pools/user/{userAddress}: {str(e)}')
        return f'Get pool user balances failed: {str(e)}', 500
    def download_service(
        did,
        service_endpoint,
        wallet,
        files,
        destination_folder,
        service_id,
        token_address,
        order_tx_id,
        index=None,
    ):

        indexes = range(len(files))
        if index is not None:
            assert isinstance(index,
                              int), logger.error("index has to be an integer.")
            assert index >= 0, logger.error(
                "index has to be 0 or a positive integer.")
            assert index < len(files), logger.error(
                "index can not be bigger than the number of files")
            indexes = [index]

        base_url = (f"{service_endpoint}"
                    f"?documentId={did}"
                    f"&serviceId={service_id}"
                    f"&serviceType={ServiceTypes.ASSET_ACCESS}"
                    f"&dataToken={token_address}"
                    f"&transferTxId={order_tx_id}"
                    f"&consumerAddress={wallet.address}")
        config = ConfigProvider.get_config()

        for i in indexes:
            signature = DataServiceProvider.sign_message(wallet, did, config)
            download_url = base_url + f"&signature={signature}&fileIndex={i}"
            logger.info(
                f"invoke consume endpoint with this url: {download_url}")
            response = DataServiceProvider.get_http_client().get(download_url,
                                                                 stream=True)
            file_name = DataProviderMock._get_file_name(response)
            DataServiceProvider.write_file(response, destination_folder,
                                           file_name or f"file-{i}")

        return True
Esempio n. 22
0
    def build_endpoint(service_name, provider_uri=None, config=None):
        if not provider_uri:
            config = config or ConfigProvider.get_config()
            provider_uri = DataServiceProvider.get_url(config)

        provider_uri = DataServiceProvider._remove_slash(provider_uri)
        parts = provider_uri.split("/")
        if parts[-2] == "services":
            base_url = "/".join(parts[:-2])
            return "GET", urljoin(base_url, "services/initialize")

        api_version = DataServiceProvider.get_api_version()
        if api_version not in provider_uri:
            provider_uri = urljoin(provider_uri, api_version)

        service_endpoints = DataServiceProvider.get_service_endpoints()
        method, url = service_endpoints[service_name]
        url = url.replace(api_version, "")

        return method, urljoin(provider_uri, url)
Esempio n. 23
0
def get_current_liquidity_stats(poolAddress):
    """

    :param poolAddress:
    :return:
    """
    try:
        data = get_request_data(request) or {}
        dt_address = data.get('datatokenAddress', None)
        from_block = data.get('fromBlock', None)
        to_block = data.get('toBlock', None)
        ocean = Ocean(ConfigProvider.get_config())
        pool_info = ocean.pool.get_short_pool_info(poolAddress, dt_address,
                                                   from_block, to_block)
        return Response(json.dumps(pool_info),
                        200,
                        content_type='application/json')
    except Exception as e:
        logger.error(f'pools/liquidity/{poolAddress}: {str(e)}')
        return f'Get pool current liquidity stats failed: {str(e)}', 500
Esempio n. 24
0
    def build_endpoint(service_name, provider_uri=None, config=None):
        if not provider_uri:
            config = config or ConfigProvider.get_config()
            provider_uri = DataServiceProvider.get_url(config)

        provider_uri = DataServiceProvider._remove_slash(provider_uri)
        parts = provider_uri.split('/')
        if parts[-2] == 'services':
            base_url = '/'.join(parts[:-2])
            return "GET", f'{base_url}/services/initialize'

        api_version = DataServiceProvider.get_api_version()
        if api_version not in provider_uri:
            provider_uri = f'{provider_uri}/{api_version}'

        service_endpoints = DataServiceProvider.get_service_endpoints()
        method, url = service_endpoints[service_name]
        url = url.replace(api_version, '')

        return method, f'{provider_uri}{url}'
Esempio n. 25
0
def get_liquidity_history(poolAddress):
    """

    :param poolAddress:
    :return: json object with two keys: `ocean` and `datatoken`
      each has a list of datapoints sampled at specific time intervals from the pools liquidity history.
    """
    try:
        result = dict()
        ocean = Ocean(ConfigProvider.get_config())
        pool = BPool(poolAddress)
        dt_address = ocean.pool.get_token_address(poolAddress,
                                                  pool,
                                                  validate=False)
        swap_fee = from_base_18(pool.getSwapFee())
        ocn_weight = from_base_18(
            pool.getDenormalizedWeight(ocean.OCEAN_address))
        dt_weight = from_base_18(pool.getDenormalizedWeight(dt_address))

        ocn_add_remove_list, dt_add_remove_list = ocean.pool.get_liquidity_history(
            poolAddress)
        ocn_add_remove_list = [(v, int(t)) for v, t in ocn_add_remove_list]
        dt_add_remove_list = [(v, int(t)) for v, t in dt_add_remove_list]

        ocn_reserve_history, dt_reserve_history, price_history = build_liquidity_and_price_history(
            ocn_add_remove_list, dt_add_remove_list, ocn_weight, dt_weight,
            swap_fee)

        result['oceanAddRemove'] = ocn_add_remove_list
        result['datatokenAddRemove'] = dt_add_remove_list
        result['oceanReserveHistory'] = ocn_reserve_history
        result['datatokenReserveHistory'] = dt_reserve_history
        result['datatokenPriceHistory'] = price_history
        return Response(json.dumps(result),
                        200,
                        content_type='application/json')
    except Exception as e:
        logger.error(f'pools/history/{poolAddress}: {str(e)}', exc_info=1)
        return f'Get pool liquidity/price history failed: {str(e)}', 500
Esempio n. 26
0
def test_trusted_algorithms(publisher_ocean_instance):
    """Tests if the trusted algorithms list is returned correctly."""
    publisher = get_publisher_wallet()
    provider_uri = ConfigProvider.get_config().provider_url

    algorithm_ddo = get_registered_algorithm_ddo(publisher_ocean_instance,
                                                 publisher)
    wait_for_ddo(publisher_ocean_instance, algorithm_ddo.did)
    assert algorithm_ddo is not None

    ddo = get_registered_ddo_with_compute_service(
        publisher_ocean_instance,
        publisher,
        provider_uri=provider_uri,
        trusted_algorithms=[algorithm_ddo.did],
    )
    wait_for_ddo(publisher_ocean_instance, ddo.did)
    assert ddo is not None

    trusted_algorithms = ddo.get_trusted_algorithms()
    service = ddo.get_service(ServiceTypes.CLOUD_COMPUTE)
    privacy_dict = service.attributes["main"].get("privacy")
    assert privacy_dict

    assert trusted_algorithms is not None
    assert len(trusted_algorithms) >= 1
    for index, trusted_algorithm in enumerate(trusted_algorithms):
        assert trusted_algorithm["did"] == algorithm_ddo.did
        assert "filesChecksum" and "containerSectionChecksum" in trusted_algorithm
        assert (
            trusted_algorithm["filesChecksum"] ==
            privacy_dict["publisherTrustedAlgorithms"][index]["filesChecksum"])
        assert (trusted_algorithm["containerSectionChecksum"] == privacy_dict[
            "publisherTrustedAlgorithms"][index]["containerSectionChecksum"])
        assert (trusted_algorithm["did"] ==
                privacy_dict["publisherTrustedAlgorithms"][index]["did"])
Esempio n. 27
0
def contracts_addresses():
    return ContractHandler.get_contracts_addresses(
        _NETWORK,
        ConfigProvider.get_config().address_file)
Esempio n. 28
0
def bfactory_address():
    return BFactory.configured_address(
        _NETWORK,
        ConfigProvider.get_config().address_file)
Esempio n. 29
0
    def __init__(self, config=None, data_provider=None):
        """Initialize Ocean class.

           >> # Make a new Ocean instance
           >> ocean = Ocean({...})

        This class provides the main top-level functions in ocean protocol:
         * Publish assets metadata and associated services
            * Each asset is assigned a unique DID and a DID Document (DDO)
            * The DDO contains the asset's services including the metadata
            * The DID is registered on-chain with a URL of the metadata store
              to retrieve the DDO from

            >> asset = ocean.assets.create(metadata, publisher_wallet)

         * Discover/Search assets via the current configured metadata store (Aquarius)
            >> assets_list = ocean.assets.search('search text')

        An instance of Ocean is parameterized by a `Config` instance.

        :param config: Config instance
        :param data_provider: DataServiceProvider instance
        """
        # Configuration information for the market is stored in the Config class
        # config = Config(filename=config_file, options_dict=config_dict)
        if not config:
            try:
                config = ConfigProvider.get_config()
            except AssertionError:
                config = Config(os.getenv(ENV_CONFIG_FILE))
                ConfigProvider.set_config(config)
        if isinstance(config, dict):
            # fallback to metadataStoreUri
            cache_key = ("metadataCacheUri" if ("metadataCacheUri" in config)
                         else "metadataStoreUri")
            aqua_url = config.get(
                cache_key, config.get("aquarius.url", "http://localhost:5000"))
            config_dict = {
                "eth-network": {
                    "network": config.get("network", "")
                },
                "resources": {
                    "aquarius.url":
                    aqua_url,
                    "provider.url":
                    config.get("providerUri", "http://localhost:8030"),
                },
            }
            config = Config(options_dict=config_dict)
        ConfigProvider.set_config(config)
        self._config = config
        ContractHandler.set_artifacts_path(self._config.artifacts_path)
        Web3Provider.init_web3(
            provider=get_web3_connection_provider(self._config.network_url))

        self._web3 = Web3Provider.get_web3()

        if not data_provider:
            data_provider = DataServiceProvider

        network = Web3Helper.get_network_name()
        addresses = get_contracts_addresses(network, self._config)
        self.assets = OceanAssets(
            self._config, data_provider,
            addresses.get(MetadataContract.CONTRACT_NAME))
        self.services = OceanServices()
        self.auth = OceanAuth(self._config.storage_path)
        self.compute = OceanCompute(self.auth, self._config, data_provider)

        ocean_address = get_ocean_token_address(network)
        self.pool = OceanPool(ocean_address, get_bfactory_address(network))
        self.exchange = OceanExchange(
            ocean_address,
            FixedRateExchange.configured_address(
                network or Web3Helper.get_network_name(),
                ConfigProvider.get_config().address_file,
            ),
            self.config,
        )

        logger.debug("Ocean instance initialized: ")
Esempio n. 30
0
def get_ocean_token_address(network=None):
    addresses = get_contracts_addresses(
        network or Web3Helper.get_network_name(), ConfigProvider.get_config())
    return addresses.get("Ocean") if addresses else None