def test_client_auto_key(self): _environ = os.environ.copy() try: os.environ["TEST_API_KEY"] = "" os.environ["COGNITE_API_KEY"] = "" with pytest.raises(ValueError): CogniteClient(project="test") os.environ["TEST_API_KEY"] = "foo" CogniteClient(project="test") finally: os.environ.clear() os.environ.update(_environ)
def cognite_client_with_token(): client = CogniteClient( token="aabbccddeeffgg", disable_pypi_version_check=True, ) return client
def cognite_client_with_client_credentials(): client = CogniteClient( token_client_id="test-client-id", token_client_secret="test-client-secret", token_url="https://param-test.com/token", token_scopes=["test-scope", "second-test-scope"], disable_pypi_version_check=True, ) return client
import pytest from cognite.experimental import CogniteClient from cognite.experimental.data_classes import ContextualizationJob COGNITE_CLIENT = CogniteClient() PNIDAPI = COGNITE_CLIENT.pnid_parsing PNID_FILE_ID = 3261066797848581 class TestPNIDParsingIntegration: def test_run_detect_str(self): entities = ["YT-96122", "XE-96125"] file_id = PNID_FILE_ID job = PNIDAPI.detect(file_id=file_id, entities=entities) assert isinstance(job, ContextualizationJob) assert "Completed" == job.status # the job is completed in the PNIDParsingAPI assert {"items", "fileId", "fileExternalId"} == set(job.result.keys()) def test_run_detect_entities_dict(self): entities = [{"name": "YT-96122"}, {"name": "XE-96125", "ee": 123}, {"name": "XWDW-9615"}] file_id = PNID_FILE_ID job = PNIDAPI.detect(file_id=file_id, entities=entities) assert isinstance(job, ContextualizationJob) assert "Completed" == job.status # the job is completed in the PNIDParsingAPI assert {"items", "fileId", "fileExternalId"} == set(job.result.keys()) def test_run_convert(self): items = [ { "text": "21-PT-1019",
def client(): client = CogniteClient() return client
import re import pytest from cognite.experimental import CogniteClient from cognite.experimental.data_classes import Type, TypeFilter, TypeList from tests.utils import jsgz_load TYPES_API = CogniteClient().types @pytest.fixture def mock_types_response(rsps): response_body = { "items": [{ "externalId": "456", "properties": [{ "propertyId": "abc", "name": "123", "type": "string" }], "id": 1, "version": 4, "createdTime": 1575892259245, "lastUpdatedTime": 1575892259245, }]
import pytest from cognite.client.exceptions import CogniteAPIError from cognite.experimental import CogniteClient c = CogniteClient() class TestCogniteClient: def test_get(self): res = c.get("/login/status") assert res.status_code == 200 def test_post(self): with pytest.raises(CogniteAPIError) as e: c.post("/login", json={}) assert e.value.code == 404 def test_put(self): with pytest.raises(CogniteAPIError) as e: c.put("/login") assert e.value.code == 404 def test_delete(self): with pytest.raises(CogniteAPIError) as e: c.delete("/login") assert e.value.code == 404
def test_client(self): CogniteClient(project="test", api_key="test")
def test_client_token(self): CogniteClient(project="test", token="test")
CDF_PROJECT = os.getenv("INPUT_CDF_PROJECT") CDF_DEPLOYMENT_CREDENTIALS = os.getenv("INPUT_CDF_DEPLOYMENT_CREDENTIALS") CDF_BASE_URL = os.getenv("INPUT_CDF_BASE_URL", "https://api.cognitedata.com") FUNCTION_PATH = os.getenv("INPUT_FUNCTION_PATH") GITHUB_EVENT_NAME = os.environ["GITHUB_EVENT_NAME"] GITHUB_REF = os.environ["GITHUB_REF"] if not (CDF_PROJECT and CDF_DEPLOYMENT_CREDENTIALS and FUNCTION_PATH): print( "Missing one of inputs cdf_project, cdf_deployment_credentials, function_path", flush=True) raise MissingInput( "Missing one of inputs cdf_project, cdf_deployment_credentials, function_path" ) print(f"Handling event {GITHUB_EVENT_NAME} on {GITHUB_REF}", flush=True) client = CogniteClient(api_key=CDF_DEPLOYMENT_CREDENTIALS, project=CDF_PROJECT, base_url=CDF_BASE_URL, client_name="deploy-function-action") user = client.login.status() print(f"Logged in as user {user}", flush=True) if GITHUB_EVENT_NAME == "push": handle_push(client.functions) elif GITHUB_EVENT_NAME == "pull_request": handle_pull_request(client.functions)
def handle(client, data): # When deploying a function from a notebook like this, all imports must be performed inside the `handle` function. from cognite.experimental import CogniteClient from cognite.client.data_classes import TimeSeriesUpdate import time # The entity matcher suggests matches with a certain score. To achieve a reasonable result, this score must be adjusted. # The default value of 0.75 has been chosen by inspecting the outcome of this function, and may be different on data from other customers. good_match_threshold = data.get("good_match_threshold", 0.75) # Create experimental SDK client as the contextualization API's are in playground and are thus not available in the regular SDK. client = CogniteClient(api_key=client.config.api_key, base_url=client.config.base_url, project=client.config.project) # Download all assets and time series, using 5 requests in parallel assets = client.assets.list(limit=-1, partitions=5) time_series = client.time_series.list(limit=-1, partitions=5) # Create simplified objects with only name and id assets_simplified = [{ "id": asset.id, "name": asset.name } for asset in assets] time_series_simplified = [{ "id": ts.id, "name": ts.name } for ts in time_series] # Train the ML Entity Matcher on the data. The SDK expects as input the array of objects you match FROM (time series) and a list of what you match TO (assets) t0 = time.time() model = client.entity_matching.fit(sources=time_series_simplified, targets=assets_simplified) print(f"Training entity matcher model with id {model} ...") model.wait_for_completion() t1 = time.time() print( f"Model {model} trained on {len(assets_simplified)} assets and {len(time_series_simplified)} time series using {t1-t0} seconds" ) # Use the ML Entity Matcher model to match the data. This model can be reused, so training is not necessary each time, but we do it for simplicity in this example. t0 = time.time() job = model.predict(time_series_simplified) result = job.result # This will wait for completion t1 = time.time() print( f"Predict finished after {t1-t0} seconds on {len(time_series_simplified)} time series." ) # Filter out the best matches with the threshold specified in the input good_match_count = 0 time_series_updates = [] for item in result["items"]: match_from = item["source"] # Time series matches = item[ "matches"] # Suggested asset matches for the time series good_matches = [ match for match in matches if match["score"] >= good_match_threshold ] if len(good_matches) > 0: good_match_count += 1 best_match = good_matches[0] time_series_updates.append( TimeSeriesUpdate(id=match_from["id"]).asset_id.set( best_match["target"]["id"])) client.time_series.update( time_series_updates) # uncomment to actually update the asset_id field print(f"Matched {good_match_count} time series to assets") return {"matches": good_match_count}
import pytest from cognite.client.exceptions import CogniteAPIError from cognite.experimental import CogniteClient from cognite.experimental.data_classes import Annotation, AnnotationFilter, AnnotationList, ContextualizationJob COGNITE_CLIENT = CogniteClient(debug=True) ANNOTATIONSAPI = COGNITE_CLIENT.annotations @pytest.fixture def new_annotation(): annot = Annotation( annotation_type="abc", annotated_resource_external_id="foo", annotated_resource_type="bar", source="sdk-integration-tests", ) c_annot = ANNOTATIONSAPI.create(annot) yield c_annot ANNOTATIONSAPI.delete(id=c_annot.id) try: ANNOTATIONSAPI.retrieve(c_annot.id) except CogniteAPIError as e: assert "Could not find" in str(e) @pytest.fixture def new_annotations(): annot = Annotation(