コード例 #1
0
    def check_for_update(self, predictor_id: UUID) -> Optional[Predictor]:
        """
        Check if there are updates available for a predictor.

        Typically these are updates to the training data. For example, a GEM Table may have
        been re-built to include additional rows.

        This check does not update the predictor; it just returns the update that is available.
        To perform the update, the response should then be used to call PredictorCollection.update

        Parameters
        ----------
        predictor_id: UUID
            Unique identifier of the predictor to check

        Returns
        -------
        Optional[Predictor]
            The update, if an update is available; None otherwise.

        """
        path = "/projects/{}/predictors/{}/check-for-update".format(
            self.project_id, predictor_id)
        data = self.session.get_resource(path)
        if data["updatable"]:
            enveloped = GraphPredictor.stuff_predictor_into_envelope(
                data["update"])
            built: Predictor = Predictor.build(enveloped)
            built.uid = predictor_id
            return built
        else:
            return None
コード例 #2
0
    def from_predictor_responses(self, predictor: Predictor,
                                 inputs: List[Descriptor]) -> List[Descriptor]:
        """
        [ALPHA] Get responses for a predictor, given an input space.

        Parameters
        ----------
        predictor : Predictor
            The predictor whose available responses are to be computed.
        inputs : List[Descriptor]
            The input space to the predictor.

        Returns
        -------
        List[Descriptor]
            The computable responses of the predictor given the provided input space (as
            descriptors).

        """
        response = self.session.post_resource(
            path='/projects/{}/material-descriptors/predictor-responses'.format(self.project_id),
            json={
                'predictor': predictor.dump()['config'],
                'inputs': [i.dump() for i in inputs]
            }
        )
        return [Descriptor.build(r) for r in response['responses']]
コード例 #3
0
def test_polymorphic_legacy_deserialization(valid_simple_ml_data):
    """Ensure that a polymorphically deserialized SimplePredictor looks sane."""
    predictor: SimpleMLPredictor = Predictor.build(valid_simple_ml_data)
    assert predictor.name == 'ML predictor'
    assert predictor.description == 'Predicts z from input x and latent variable y'
    assert len(predictor.inputs) == 1
    assert predictor.inputs[0] == x
    assert len(predictor.outputs) == 1
    assert predictor.outputs[0] == z
    assert len(predictor.latent_variables) == 1
    assert predictor.latent_variables[0] == y
    assert predictor.training_data == 'training_data_key'
コード例 #4
0
def test_graph_default_training_data():
    """Test that default training data list isn't shared."""
    # create two serialized graph predictors with no defined training data
    gp1raw = {'config': {'name': 'one', 'description': '', 'predictors': [], 'type': 'Graph'},
              'archived': False, 'module_type': 'PREDICTOR', 'display_name': 'one'}
    gp2raw = {'config': {'name': 'two', 'description': '', 'predictors': [], 'type': 'Graph'},
              'archived': False, 'module_type': 'PREDICTOR', 'display_name': 'two'}

    # build them, populating the default empty list of training data
    gp1: GraphPredictor = Predictor.build(gp1raw)
    gp2: GraphPredictor = Predictor.build(gp2raw)

    # check it is empty
    assert len(gp1.training_data) == 0
    assert len(gp2.training_data) == 0

    # add training data to one of them
    gp1.training_data.append(GemTableDataSource(uuid.uuid4(), 1))

    # check that the training data doesn't bleed into both
    assert len(gp1.training_data) == 1
    assert len(gp2.training_data) == 0
コード例 #5
0
def test_polymorphic_legacy_deserialization(valid_simple_ml_predictor_data):
    """Ensure that a polymorphically deserialized SimplePredictor looks sane."""
    predictor: SimpleMLPredictor = Predictor.build(
        valid_simple_ml_predictor_data)
    assert predictor.name == 'ML predictor'
    assert predictor.description == 'Predicts z from input x and latent variable y'
    assert len(predictor.inputs) == 1
    assert predictor.inputs[0] == RealDescriptor("x", 0, 100, "")
    assert len(predictor.outputs) == 1
    assert predictor.outputs[0] == RealDescriptor("z", 0, 100, "")
    assert len(predictor.latent_variables) == 1
    assert predictor.latent_variables[0] == RealDescriptor("y", 0, 100, "")
    assert len(predictor.training_data) == 1
    assert predictor.training_data[0].table_id == UUID(
        'e5c51369-8e71-4ec6-b027-1f92bdc14762')
コード例 #6
0
def test_invalid_predictor_type(invalid_predictor_data):
    """Ensures we raise proper exception when an invalid type is used."""
    with pytest.raises(ValueError):
        Predictor.build(invalid_predictor_data)
コード例 #7
0
 def build(self, data: dict) -> Predictor:
     """Build an individual Predictor."""
     predictor: Predictor = Predictor.build(data)
     predictor._session = self.session
     predictor._project_id = self.project_id
     return predictor