def test_target_initialization():
    """
    Tests that the design target must be initialized
    with an objective that is either Min or Max
    """

    try:
        Target(name="Band gap", objective="asdf")
        assert False, "Target class should require that objective be one of Min or Max"
    except CitrinationClientError:
        pass

    # These initializations should not throw an error
    Target(name="Band gap", objective="Min")
    Target(name="Band gap", objective="Max")
Example #2
0
 def get_candidate_recipes(self):
     straints = []
     for prop_name, val in self.constraints.items():
         straints.append(
             cc_constraints.RealValueConstraint('Property ' + prop_name,
                                                val))
     for prop_name, lmts in self.range_constraints.items():
         straints.append(
             cc_constraints.RealRangeConstraint('Property ' + prop_name,
                                                lmts[0], lmts[1]))
     for prop_name, cats in self.categorical_constraints.items():
         straints.append(
             cc_constraints.CategoricalConstraint('Property ' + prop_name,
                                                  cats))
     tgt_name = 'Property ' + list(self.target.keys())[0]
     tgt_val = list(self.target.values())[0]
     tgt = Target(tgt_name, tgt_val)
     #DOC: cc.submit_design_run(
     #       data_view_id,
     #       num_candidates (int in [1,20]),
     #       effort (int in [1,30]),
     #       target=None, constraints=[],
     #       sampler='Default')
     msg = 'Designing for: \nTarget: {} \nConstraints: {} \nRange constraints: {} \nCategorical constraints: {}'.format(
         self.target, self.constraints, self.range_constraints,
         self.categorical_constraints)
     if self.verbose: self.message_callback(msg)
     self.add_to_history(msg)
     des = self.citrination_client.client.submit_design_run(
         self.dataview_id, self.n_candidates, self.design_effort, tgt,
         straints)
     fin = False
     while not fin:
         time.sleep(10)
         stat = self.citrination_client.client.get_design_run_status(
             self.dataview_id, des.uuid)
         if self.verbose:
             self.message_callback('design finished: {} ({}/100)'.format(
                 stat.finished(), stat.progress))
         if int(stat.progress) == 100:
             fin = True
     desres = self.citrination_client.client.get_design_run_results(
         self.dataview_id, des.uuid)
     for result in desres.best_materials:
         self.best_materials.append(result)
     for result in desres.next_experiments:
         self.next_experiments.append(result)
def run_sequential_learning(client:CitrinationClient, view_id:int, dataset_id:int,
                        num_candidates_per_iter:int,
                        design_effort:int, wait_time:int,
                        num_sl_iterations:int, input_properties:List[str],
                        target:List[str], print_output:bool,
                        true_function:Callable[[np.ndarray], float],
                        score_type:str,
                        ) -> Tuple[List[float], List[float]]:
    '''Runs SL design

    :param client: Client object
    :type client: CitrinationClient
    :param view_id: View ID
    :type view_id: int
    :param dataset_id: Dataset ID
    :type dataset_id: int
    :param num_candidates_per_iter: Candidates in a batch
    :type num_candidates_per_iter: int
    :param design_effort: Effort from 1-30
    :type design_effort: int
    :param wait_time: Wait time in seconds before polling API
    :type wait_time: int
    :param num_sl_iterations: SL iterations to run
    :type num_sl_iterations: int
    :param input_properties: Inputs
    :type input_properties: List[str]
    :param target: ("Output property", {"Min", "Max"})
    :type target: List[str]
    :param print_output: Whether or not to print outputs
    :type print_output: bool
    :param true_function: Actual function for evaluating measured/true values
    :type true_function: Callable[[np.ndarray], float]
    :param score_type: MLI or MEI
    :type score_type: str
    :return: 2-tuple: list of predicted scores/uncertainties; list of measured scores/uncertainties
    :rtype: Tuple[List[float], List[float]]
    '''



    best_sl_pred_vals = []
    best_sl_measured_vals = []

    _wait_on_ingest(client, dataset_id, wait_time, print_output)

    for i in range(num_sl_iterations):
        if print_output:
            print(f"\n---STARTING SL ITERATION #{i+1}---")

        _wait_on_ingest(client, dataset_id, wait_time, print_output)
        _wait_on_data_view(client, dataset_id, view_id, wait_time, print_output)

        # Submit a design run
        design_id = client.submit_design_run(
                data_view_id=view_id,
                num_candidates=num_candidates_per_iter,
                effort=design_effort,
                target=Target(*target),
                constraints=[],
                sampler="Default"
            ).uuid

        if print_output:
            print(f"Created design run with ID {design_id}")

        _wait_on_design_run(client, design_id, view_id, wait_time, print_output)

        # Compute the best values with uncertainties as a list of (value, uncertainty)
        if score_type == "MEI":
            candidates = client.get_design_run_results(view_id, design_id).best_materials
        else:
            candidates = client.get_design_run_results(view_id, design_id).next_experiments
        values_w_uncertainties = [
            (
                m["descriptor_values"][target[0]],
                m["descriptor_values"][f"Uncertainty in {target[0]}"]
            ) for m in candidates
        ]

        # Find and save the best predicted value
        if target[1] == "Min":
            best_value_w_uncertainty = min(values_w_uncertainties, key=lambda x: x[0])
        else:
            best_value_w_uncertainty = max(values_w_uncertainties, key=lambda x: x[0])

        best_sl_pred_vals.append(best_value_w_uncertainty)
        if print_output:
            print(f"SL iter #{i+1}, best predicted (value, uncertainty) = {best_value_w_uncertainty}")

        # Update dataset w/ new candidates
        new_x_vals = []
        for material in candidates:
            new_x_vals.append(np.array(
                [float(material["descriptor_values"][x]) for x in input_properties]
            ))

        temp_dataset_fpath = f"design-{design_id}.json"
        write_dataset_from_func(true_function, temp_dataset_fpath, new_x_vals)
        upload_data_and_get_id(
            client,
            "", # No name needed for updating a dataset
            temp_dataset_fpath,
            given_dataset_id=dataset_id
        )

        _wait_on_ingest(client, dataset_id, wait_time, print_output)

        if print_output:
            print(f"Dataset updated: {len(new_x_vals)} candidates added")

        query_dataset = PifSystemReturningQuery(size=9999,
                            query=DataQuery(
                            dataset=DatasetQuery(
                                id=Filter(equal=str(dataset_id))
                        )))
        query_result = client.search.pif_search(query_dataset)

        if print_output:
            print(f"New dataset contains {query_result.total_num_hits} PIFs")

        # Update measured values in new dataset
        dataset_y_values = []
        for hit in query_result.hits:
            # Assume last prop is output if following this script
            dataset_y_values.append(
                float(hit.system.properties[-1].scalars[0].value)
            )

        if target[1] == "Min":
            best_sl_measured_vals.append(min(dataset_y_values))
        else:
            best_sl_measured_vals.append(max(dataset_y_values))

        # Retrain model w/ wait times
        client.data_views.retrain(view_id)
        _wait_on_data_view(client, dataset_id, view_id, wait_time, print_output)

    if print_output:
        print("SL finished!\n")

    return (best_sl_pred_vals, best_sl_measured_vals)
from citrination_client.models.design.constraints import *

#### Set up a citrination client
client = CitrinationClient(environ.get("CITRINATION_API_KEY"),
                           "https://citrination.com")

#### Initialize the client
dataset_id = 111
data_view_id = 97
model_client = client.models

#### Submit a design request
design_uuid = model_client.submit_design_run(data_view_id,
                                             num_candidates=20,
                                             effort=10,
                                             target=Target(
                                                 'Property r0_sphere', 10),
                                             constraints=[],
                                             sampler="Default").uuid

#### Wait for design to finish
fin = False

while not fin:

    stat = model_client.get_design_run_status(data_view_id, design_uuid)
    sleep(10)
    print('design finished: {} ({}/100)'.format(stat.finished(),
                                                stat.progress))

    if int(stat.progress) == 100:
        fin = True