def test_cannot_add_model_with_auto_generated_metadata(self, context, core_org):
        """
        <b>Description:</b>
        Try to add model with auto generated metadata.

        <b>Input data:</b>
        1. Organization
        2. Model with auto generated metadata

        <b>Expected results:</b>
        Test passes when added model has auto generated metadata - different than those entered during creation.

        <b>Steps:</b>
        1. Prepare metadata with auto generated values.
        2. Try to add new model with prepared metadata.
        3. Verify that added model contains expected metadata.
        """
        step("Try to create model with auto-generated metadata")
        metadata_auto_generated = {
            "added_by": 'test-user',
            "added_on": '2000-01-01 00:00 GMT',
            "modified_by": 'test-user',
            "modified_on": '2000-01-01 00:00 GMT'
        }
        metadata = MODEL_METADATA.copy()
        metadata.update(metadata_auto_generated)
        test_model = ScoringEngineModel.create(context, org_guid=core_org, **metadata)
        step("Check that params for auto-generated metadata do not affect created model")
        incorrect_metadata = []
        for k, v in metadata_auto_generated.items():
            if getattr(test_model, k) == v:
                incorrect_metadata.append("{}={}".format(k, v))
        assert len(incorrect_metadata) == 0, "Incorrect metadata: {}".format(", ".join(incorrect_metadata))
        models = ScoringEngineModel.get_list(org_guid=core_org)
        assert test_model in models
    def test_add_model_with_minimum_required_params(self, context, core_org):
        """
        <b>Description:</b>
        Add new model to model catalog without all params - only required.

        <b>Input data:</b>
        1. Organization
        2. Model with minimum required parameters: creation tool and name

        <b>Expected results:</b>
        Test passes when model is added to model catalog successfully.

        <b>Steps:</b>
        1. Prepare metadata contains only name and creation tool values which are required.
        2. Add model with prepared metadata to model catalog.
        3. Verify that created model is shown on models list.
        """
        step("Add model to organization")
        metadata = {
            "name": MODEL_METADATA["name"],
            "creation_tool": MODEL_METADATA["creation_tool"]
        }
        new_model = ScoringEngineModel.create(context, org_guid=core_org, **metadata)
        models = ScoringEngineModel.get_list(org_guid=core_org)
        assert new_model in models
Ejemplo n.º 3
0
    def test_get_org_models(self, test_user_clients, role, core_org):
        """
        <b>Description:</b>
        Get list of models in org from model catalog using user client with user/admin role.

        <b>Input data:</b>
        1. User
        2. User role (user or admin)
        3. Organization

        <b>Expected results:</b>
        Test passes when user client get list of models successfully.

        <b>Steps:</b>
        1. Get list of models using tested user client.
        2. Get list of models using default client.
        3. Compare responses from previous steps.
        """
        step("List models in an organization using {}".format(role))
        client = test_user_clients[role]
        models = ScoringEngineModel.get_list(org_guid=core_org, client=client)
        step("List or models in an organization using admin")
        expected_models = ScoringEngineModel.get_list(org_guid=core_org)
        step("Check that the two lists are the same")
        assert sorted(models) == sorted(expected_models)
    def test_add_new_artifact_to_model(self, sample_model, test_user_clients, role):
        """
        <b>Description:</b>
        Add artifact file to the model.

        <b>Input data:</b>
        1. Example model existing on models list in model catalog.
        2. User
        3. User role (user or admin)

        <b>Expected results:</b>
        Test passes when artifact is added to the model successfully.

        <b>Steps:</b>
        1. Add artifact to the model.
        2. Verify that added artifact is shown on model's artifacts list.
        """
        client = test_user_clients[role]
        step("Add new artifact to model using {}".format(role))
        new_artifact = ModelArtifact.upload_artifact(model_id=sample_model.id, client=client,
                                                     **self.ARTIFACT_METADATA)
        step("Get artifact {} metadata of model {}".format(new_artifact.id, sample_model.id))
        artifact = ModelArtifact.get_artifact(model_id=sample_model.id, artifact_id=new_artifact.id)
        model_artifacts = ScoringEngineModel.get(model_id=sample_model.id).artifacts
        assert artifact in model_artifacts
    def test_delete_model(self, sample_model, test_user_clients, role, core_org):
        """
        <b>Description:</b>
        Delete model from model catalog.

        <b>Input data:</b>
        1. Example model existing on models list in model catalog.
        2. User
        3. User role (user or role)
        4. Organization

        <b>Expected results:</b>
        Test passes when model is deleted from model catalog successfully.

        <b>Steps:</b>
        1. Delete model from model catalog.
        2. Get list of models from model catalog.
        3. Verify that model does not exist on models list.
        """
        step("Delete model organization using {}".format(role))
        client = test_user_clients[role]
        sample_model.delete(client=client)
        step("Check that the deleted model is not on the list of models")
        models = ScoringEngineModel.get_list(org_guid=core_org)
        assert sample_model not in models
Ejemplo n.º 6
0
    def test_can_patch_metadata_without_all_fields(self, sample_model):
        """
        <b>Description:</b>
        Use PATCH request to update part of model metadata.

        <b>Input data:</b>
        1. Example model existing on models list in model catalog.

        <b>Expected results:</b>
        Test passes when model metadata has been updated successfully.

        <b>Steps:</b>
        1. Prepare model metadata to update.
        2. Update model with prepared metadata.
        3. Get model metadata.
        4. Verify that model has expected metadata values.
        """
        metadata = self.UPDATED_METADATA.copy()
        fields_to_not_update = {"creation_tool", "name"}
        for key in fields_to_not_update:
            if key in fields_to_not_update:
                del metadata[key]
        sample_model.patch(**metadata)
        updated_model = ScoringEngineModel.get(model_id=sample_model.id)
        assert updated_model.name == sample_model.name
        assert updated_model.creation_tool == sample_model.creation_tool
Ejemplo n.º 7
0
    def test_updating_auto_generated_metadata_has_no_effect(self, sample_model):
        """
        <b>Description:</b>
        Try to update model auto generated metadata.

        <b>Input data:</b>
        1. Example model existing on models list in model catalog.

        <b>Expected results:</b>
        Test passes when updating has no effect on auto generated metadata fields.

        <b>Steps:</b>
        1. Prepare auto generated metadata.
        2. Update model with prepared metadata.
        2. Get model metadata.
        3. Verify that model has expected metadata values.
        """
        auto_generated_metadata = {
            "added_by": "test-user",
            "added_on": "2000-01-01 00:00 GMT",
            "modified_by": "test-user",
            "modified_on": "2000-01-01 00:00 GMT"
        }
        step("Try to update model with auto-generated metadata")
        sample_model.patch(**auto_generated_metadata)
        step("Check that params for auto-generated metadata do not affect created model")
        model = ScoringEngineModel.get(model_id=sample_model.id)
        incorrect_metadata = []
        for k, v in auto_generated_metadata.items():
            if getattr(model, k) == v:
                incorrect_metadata.append("{}={}".format(k, v))
        assert len(incorrect_metadata) == 0, "Incorrect metadata: {}".format(", ".join(incorrect_metadata))
        assert model == sample_model
Ejemplo n.º 8
0
def test_add_new_model_to_organization(context, core_org):
    """
    <b>Description:</b>
    Checks if new model can be added to the platform.

    <b>Input data:</b>
    1. Scoring Engine Model.

    <b>Expected results:</b>
    Test passes when model was successfully added to the platform.

    <b>Steps:</b>
    1. Create a model in the organization.
    2. Get list with models on the platform.
    3. Verify the model is on the list.
    """
    step("Add model to organization as admin")
    new_model = ScoringEngineModel.create(context,
                                          org_guid=core_org,
                                          **MODEL_METADATA)
    step("Check that the model is on model list")
    models = ScoringEngineModel.get_list(org_guid=core_org)
    assert new_model in models
    def test_add_new_model_to_organization(self, context, test_user_clients, role, core_org):
        """
        <b>Description:</b>
        Add new model to model catalog.

        <b>Input data:</b>
        1. User
        2. User role (user or admin)
        3. Organization
        4. Model

        <b>Expected results:</b>
        Test passes when created model exists on models list.

        <b>Steps:</b>
        1. Add model to organization.
        2. Verify that created model is shown on models list.
        """
        client = test_user_clients[role]
        step("Add model to organization using {}".format(role))
        new_model = ScoringEngineModel.create(context, org_guid=core_org, client=client, **MODEL_METADATA)
        step("Check that the model is on model list")
        models = ScoringEngineModel.get_list(org_guid=core_org)
        assert new_model in models
    def test_can_add_more_than_one_artifacts_to_model(self, sample_model):
        """
        <b>Description:</b>
        Add two artifacts to the one model.

        <b>Input data:</b>
        1. Example model existing on models list in model catalog.

        <b>Expected results:</b>
        Test passes when first and second artifacts are added to the model successfully.

        <b>Steps:</b>
        1. Add first artifact to the model.
        2. Add second artifact to the model.
        3. Verify that both artifacts are shown on model's artifacts list.
        """
        step("Add new artifact to model")
        uploaded_first_artifact = ModelArtifact.upload_artifact(model_id=sample_model.id, **self.ARTIFACT_METADATA)
        first_artifact = ModelArtifact.get_artifact(model_id=sample_model.id, artifact_id=uploaded_first_artifact.id)
        step("Try to add second artifact to model")
        uploaded_second_artifact = ModelArtifact.upload_artifact(model_id=sample_model.id, **self.ARTIFACT_METADATA)
        second_artifact = ModelArtifact.get_artifact(model_id=sample_model.id, artifact_id=uploaded_second_artifact.id)
        assert first_artifact in ScoringEngineModel.get(model_id=sample_model.id).artifacts
        assert second_artifact in ScoringEngineModel.get(model_id=sample_model.id).artifacts
Ejemplo n.º 11
0
    def test_get_metadata_of_model(self, sample_model, test_user_clients,
                                   role):
        """
        <b>Description:</b>
        Get metadata of model from model catalog using user client with user/admin role.

        <b>Input data:</b>
        1. Example model existing on models list in model catalog.
        2. User
        3. User role (user or admin)

        <b>Expected results:</b>
        Test passes when user client get list of models successfully.

        <b>Steps:</b>
        1. Get metadata of model using tested user client.
        2. Verify that metadata has expected values.
        """
        step("Get model using {}".format(role))
        client = test_user_clients[role]
        model = ScoringEngineModel.get(model_id=sample_model.id, client=client)
        assert model == sample_model
Ejemplo n.º 12
0
    def test_update_only_part_of_metadata_fields(self, sample_model):
        """
        <b>Description:</b>
        Update part of model metadata.

        <b>Input data:</b>
        1. Example model existing on models list in model catalog.

        <b>Expected results:</b>
        Test passes when model metadata has been updated successfully.

        <b>Steps:</b>
        1. Prepare model metadata to update.
        2. Update model with prepared metadata.
        3. Get model metadata.
        4. Verify that model has expected metadata values.
        """
        metadata = self.UPDATED_METADATA.copy()
        for m in ("revision", "algorithm", "description"):
            del metadata[m]
        sample_model.update(**metadata)
        model = ScoringEngineModel.get(model_id=sample_model.id)
        assert sample_model == model
Ejemplo n.º 13
0
    def test_update_model(self, sample_model, test_user_clients, role):
        """
        <b>Description:</b>
        Update model metadata using user client with user/admin role.

        <b>Input data:</b>
        1. Example model existing on models list in model catalog.
        2. User
        3. User role

        <b>Expected results:</b>
        Test passes when both user clients can update model metadata successfully.

        <b>Steps:</b>
        1. Update model metadata using tested user client.
        2. Get model metadata.
        3. Verify that model has expected metadata values.
        """
        step("Update model using {}".format(role))
        client = test_user_clients[role]
        sample_model.update(client=client, **self.UPDATED_METADATA)
        step("Check that the model is updated")
        model = ScoringEngineModel.get(model_id=sample_model.id)
        assert model == sample_model
Ejemplo n.º 14
0
    def test_patch_model_metadata(self, sample_model, test_user_clients, role):
        """
        <b>Description:</b>
        Use PATCH request to update model metadata using user client with user/admin role.

        <b>Input data:</b>
        1. Example model existing on models list in model catalog.
        2. User
        3. User role

        <b>Expected results:</b>
        Test passes when both user clients can update model metadata successfully.

        <b>Steps:</b>
        1. Update model metadata using tested user client.
        2. Get model metadata.
        3. Verify that model has expected metadata values.
        """
        step("Update model name and creation_tool")
        client = test_user_clients[role]
        sample_model.patch(client=client, **self.UPDATED_METADATA)
        step("Get model and check that metadata are updated")
        model = ScoringEngineModel.get(model_id=sample_model.id)
        assert model == sample_model