Beispiel #1
0
    def process_outputs(self, result: Result):
        # Get basic task information
        smiles, = result.args

        # Release nodes for use by other processes
        self.rec.release("simulation", 1)

        # If successful, add to the database
        if result.success:
            # Mark that we've had another complete result
            self.n_evaluated += 1
            self.logger.info(f'Success! Finished screening {self.n_evaluated}/{self.n_to_evaluate} molecules')

            # Determine whether to start re-training
            if self.n_evaluated % self.n_complete_before_retrain == 0:
                if self.update_in_progress.is_set():
                    self.logger.info(f'Waiting until previous training run completes.')
                else:
                    self.logger.info(f'Starting retraining.')
                    self.start_training.set()
            self.logger.info(f'{self.n_complete_before_retrain - self.n_evaluated % self.n_complete_before_retrain} results needed until we re-train again')

            # Store the data in a molecule data object
            data = MoleculeData.from_identifier(smiles=smiles)
            opt_records, hess_records = result.value
            for r in opt_records:
                data.add_geometry(r)
            for r in hess_records:
                data.add_single_point(r)
            data.update_thermochem()
            apply_recipes(data)

            # Attach the data source for the molecule
            data.subsets.append(self.search_space_name)
            
            # Add the IPs to the result object
            result.task_info["ip"] = data.oxidation_potential.copy()

            # Add to database
            with open(self.output_dir.joinpath('moldata-records.json'), 'a') as fp:
                print(json.dumps([datetime.now().timestamp(), data.json()]), file=fp)
            self.database.update_molecule(data)

            # Write to disk
            with open(self.output_dir.joinpath('qcfractal-records.json'), 'a') as fp:
                for r in opt_records + hess_records:
                    print(r.json(), file=fp)
            self.logger.info(f'Added complete calculation for {smiles} to database.')
            
            # Mark that we've completed one
            if self.n_evaluated >= self.n_to_evaluate:
                self.logger.info(f'No more molecules left to screen')
                self.done.set()
        else:
            self.logger.info(f'Computations failed for {smiles}. Check JSON file for stacktrace')

        # Write out the result to disk
        with open(self.output_dir.joinpath('simulation-results.json'), 'a') as fp:
            print(result.json(exclude={'value'}), file=fp)
        self.logger.info(f'Processed simulation task.')
Beispiel #2
0
    def send_inputs(self,
                    *input_args: Any,
                    method: str = None,
                    input_kwargs: Optional[Dict[str, Any]] = None,
                    topic: str = 'default'):
        """Send inputs to be computed

        Args:
            *input_args (Any): Positional arguments to a function
            method (str): Name of the method to run. Optional
            input_kwargs (dict): Any keyword arguments for the function being run
            topic (str): Topic for the queue, which sets the topic for the result.
        """

        # Make fake kwargs, if needed
        if input_kwargs is None:
            input_kwargs = dict()

        # Create a new Result object
        result = Result((input_args, input_kwargs), method=method)

        # Push the serialized value to the method server
        if self.use_pickle:
            result.pickle_data()
        self.outbound.put(result.json(exclude_unset=True), topic=topic)
        logger.info(f'Client sent a {method} task with topic {topic}')
Beispiel #3
0
    def send_result(self, result: Result, topic: str = 'default'):
        """Send a value to a client

        Args:
            result (Result): Result object to communicate back
            topic (str): Topic of the calculation
        """
        if self.use_pickle:
            result.pickle_data()
        self.outbound.put(result.json(), topic=topic)
Beispiel #4
0
def output_result(queues: MethodServerQueues, topic: str, result_obj: Result,
                  wrapped_output: Tuple[Any, float, float]):
    """Submit the function result to the Redis queue

    Args:
        queues: Queues used to communicate with Redis
        topic: Topic to assign in output queue
        result_obj: Result object containing the inputs, to be sent back with outputs
        wrapped_output: Result from invoking the function and the inputs
    """
    value, start_time, runtime = wrapped_output
    result_obj.time_compute_started = start_time
    result_obj.set_result(value, runtime)
    return queues.send_result(result_obj, topic=topic)
Beispiel #5
0
    def process_outputs(self, result: Result):
        # Get basic task information
        smiles, n_nodes, _ = result.args

        # Release nodes for use by other processes
        self.rec.release("simulation", n_nodes)

        # If successful, add to the database
        if result.success:
            # Store the data in a molecule data object
            record = result.value

            # Write to disk
            with open(self.output_dir.joinpath('qcfractal-records.json'),
                      'a') as fp:
                print(record.json(), file=fp)
            self.logger.info(
                f'Added complete calculation for {smiles} to database.')
        else:
            self.logger.info(
                f'Computations failed for {smiles}. Check JSON file for stacktrace'
            )

        # Write out the result to disk
        with open(self.output_dir.joinpath('simulation-results.json'),
                  'a') as fp:
            print(result.json(exclude={'value'}), file=fp)
Beispiel #6
0
    def update_weights(self, result: Result):
        """Process the results of the saved model"""

        # Save results to disk
        with open(self.output_dir.joinpath('training-results.json'),
                  'a') as fp:
            print(result.json(exclude={'inputs', 'value'}), file=fp)

        # Make sure the run completed
        model_id = result.task_info['model_id']
        if not result.success:
            self.logger.warning(f'Training failed for {model_id}')
        else:
            # Update weights
            weights, history = result.value
            self.mpnns[model_id].set_weights(weights)

            # Print out some status info
            self.logger.info(f'Model {model_id} finished training.')
            with open(self.output_dir.joinpath('training-history.json'),
                      'a') as fp:
                print(repr(history), file=fp)

        # Send the model to inference
        self.start_inference.set()
        self.ready_models.put(self.mpnns[model_id])

        # Mark that a model has finished training and trigger inference if all done
        self.num_training_complete += 1
        self.logger.info(
            f'Processed training task. {len(self.mpnns) - self.num_training_complete} models left to go'
        )
Beispiel #7
0
    def get_task(self, timeout: int = None) -> Tuple[str, Result]:
        """Get a task object

        Args:
            timeout (int): Timeout for waiting for a task
        Returns:
            - (str) Topic of the calculation. Used in defining which queue to use to send the results
            - (Result) Task description
        Raises:
            TimeoutException: If the timeout on the queue is reached
            KillSignalException: If the queue receives a kill signal
        """

        # Pull a record off of the queue
        output = self.inbound.get(timeout)

        # Return the kill signal
        if output is None:
            raise TimeoutException('Listening on task queue timed out')
        elif output[1] == "null":
            raise KillSignalException('Kill signal received on task queue')
        topic, message = output

        # Get the message
        task = Result.parse_raw(message)
        if self.use_pickle:
            task.unpickle_data()
        task.mark_input_received()
        return topic, task
Beispiel #8
0
    def get_result(self,
                   timeout: Optional[int] = None,
                   topic: Optional[str] = None) -> Optional[Result]:
        """Get a value from the MethodServer

        Args:
            timeout (int): Timeout for waiting for a value
            topic (str): What topic of task to wait for. Set to ``None`` to pull all topics
        Returns:
            (Result) Result from a computation, or ``None`` if timeout is met
        """

        # Get a value
        output = self.inbound.get(timeout=timeout, topic=topic)
        logging.debug(f'Received value: {str(output)[:50]}')

        # If None, return because this is a timeout
        if output is None:
            return output
        topic, message = output

        # Parse the value and mark it as complete
        result_obj = Result.parse_raw(message)
        if self.use_pickle:
            result_obj.unpickle_data()
        result_obj.mark_result_received()

        # Some logging
        logger.info(
            f'Client received a {result_obj.method} result with topic {topic}')

        return result_obj
Beispiel #9
0
    def update_weights(self, result: Result):
        """Process the results of the saved model"""
        self.rec.release('training', 1)

        # Save results to disk
        with open(self.output_dir.joinpath('training-results.json'),
                  'a') as fp:
            print(result.json(exclude={'inputs', 'value'}), file=fp)

        # Make sure the run completed
        model_id = result.task_info['model_id']
        if not result.success:
            self.logger.warning(f'Training failed for {model_id}')
            return

        # Update weights
        weights, history = result.value
        self.mpnns[model_id].set_weights(weights)

        # Save the model   # COMMENTED OUT DUE TO PERFORMANCE Problems?
        #model_folder = self.output_dir.joinpath('models')
        #model_folder.mkdir(exist_ok=True)
        #self.mpnns[model_id].save(model_folder.joinpath(f'model-{model_id}_t{self.inference_batch}.h5'))

        # Print out some status info
        self.logger.info(f'Model {model_id} finished training.')
        with open(self.output_dir.joinpath('training-history.json'),
                  'a') as fp:
            print(repr(history), file=fp)
Beispiel #10
0
    def update_weights(self, result: Result):
        """Process the results of the saved model"""

        # Save results to disk
        with open(self.output_dir.joinpath('training-results.json'),
                  'a') as fp:
            print(result.json(exclude={'inputs', 'value'}), file=fp)

        # Make sure the run completed
        model_id = result.task_info['model_id']
        level = result.task_info['level']
        if not result.success:
            self.logger.warning(
                f'Training failed for level {level} model {model_id}')
        else:
            self.logger.info(
                f'Training succeeded for level {level} model {model_id}')

            # Get the update message
            output = result.value
            assert str(
                output
            ) != ""  # TODO (wardlt): Figure out if `iter` doesn't work with lazy_object_proxy
            message, history = output

            # Update the model
            if level == 'base':
                model_collection = self.search_spec.base_model
            else:
                level_id = self.search_spec.levels.index(level)
                model_collection = self.search_spec.model_levels[level_id]
            model_collection.update_model(model_id, message)
            self.logger.info(
                f'Saved updated model to {model_collection.model_paths[model_id]}'
            )

            # Print out some status info
            with open(self.output_dir.joinpath('training-history.json'),
                      'a') as fp:
                print(repr(history), file=fp)

        # Send the model to inference
        self.ready_models.put((level, model_id))

        # Mark that a model has finished training and trigger inference if all done
        self.num_training_complete += 1
Beispiel #11
0
    def process_outputs(self, result: Result):
        # Get basic task information
        smiles, n_nodes = result.args

        # Release nodes for use by other processes
        self.rec.release("simulation", n_nodes)

        # If successful, add to the database
        if result.success:
            # Store the data in a molecule data object
            data = MoleculeData.from_identifier(smiles=smiles)
            opt_records, hess_records = result.value
            for r in opt_records:
                data.add_geometry(r)
            for r in hess_records:
                data.add_single_point(r)

            # Add to database
            self.mongo.update_molecule(data)
            with open(self.output_dir.joinpath('moldata-records.json'),
                      'a') as fp:
                print(json.dumps([datetime.now().timestamp(),
                                  data.json()]),
                      file=fp)
            self.database.append(data)

            # Write to disk
            with open(self.output_dir.joinpath('qcfractal-records.json'),
                      'a') as fp:
                for r in opt_records + hess_records:
                    print(r.json(), file=fp)
            self.logger.info(
                f'Added complete calculation for {smiles} to database.')
        else:
            self.logger.info(
                f'Computations failed for {smiles}. Check JSON file for stacktrace'
            )

        # Write out the result to disk
        with open(self.output_dir.joinpath('simulation-results.json'),
                  'a') as fp:
            print(result.json(exclude={'value'}), file=fp)
Beispiel #12
0
    def update_weights(self, result: Result):
        """Process the results of the saved model"""
        self.rec.release('training', 1)

        # Save results to disk
        with open(self.output_dir.joinpath('training-results.json'), 'a') as fp:
            print(result.json(exclude={'inputs', 'value'}), file=fp)

        # Make sure the run completed
        model_id = result.task_info['model_id']
        if not result.success:
            self.logger.warning(f'Training failed for {model_id}')
            return

        # Update weights
        weights, history = result.value
        self.mpnns[model_id].set_weights(weights)
        
        # Print out some status info
        self.logger.info(f'Model {model_id} finished training.')
        with open(self.output_dir.joinpath('training-history.json'), 'a') as fp:
            print(repr(history), file=fp)
Beispiel #13
0
    def record_qc(self, result: Result):
        # Get basic task information
        smiles, = result.args
        
        # Release nodes for use by other processes
        self.rec.release("simulation", self.nodes_per_qc)

        # If successful, add to the database
        if result.success:
            # Store the data in a molecule data object
            data = MoleculeData.from_identifier(smiles=smiles)
            opt_records, hess_records = result.value
            for r in opt_records:
                data.add_geometry(r)
            for r in hess_records:
                data.add_single_point(r)
            apply_recipes(data)  # Compute the IP

            # Add to database
            with open(self.output_dir.joinpath('moldata-records.json'), 'a') as fp:
                print(json.dumps([datetime.now().timestamp(), data.json()]), file=fp)
            self.database.append(data)
            
            # If the database is complete, set "done"
            if len(self.database) >= self.target_size:
                self.logger.info(f'Database has reached target size of {len(self.database)}. Exiting')
                self.done.set()

            # Write to disk
            with open(self.output_dir.joinpath('qcfractal-records.json'), 'a') as fp:
                for r in opt_records + hess_records:
                    print(r.json(), file=fp)
            self.logger.info(f'Added complete calculation for {smiles} to database.')
        else:
            self.logger.info(f'Computations failed for {smiles}. Check JSON file for stacktrace')

        # Write out the result to disk
        with open(self.output_dir.joinpath('simulation-results.json'), 'a') as fp:
            print(result.json(exclude={'value'}), file=fp)
Beispiel #14
0
    def record_qc(self, result: Result):
        # Get basic task information
        inchi = result.task_info['inchi']
        self.logger.info(f'{result.method} computation for {inchi} finished')

        # Release nodes for use by other processes
        self.rec.release("simulation", self.nodes_per_qc)

        # If successful, add to the database
        if result.success:
            self.n_evaluated += 1

            # Check if we are done
            if self.n_evaluated >= self.n_to_evaluate:
                self.logger.info(f'We have evaluated as many molecules as requested. exiting')
                self.done.set()

            # Store the data in a molecule data object
            data = self.database.get_molecule_record(inchi=inchi)  # Get existing information
            opt_records, spe_records = result.value
            for r in opt_records:
                data.add_geometry(r, overwrite=True)
            for r in spe_records:
                data.add_single_point(r)
            apply_recipes(data)  # Compute the IP

            # Add ionization potentials to the task_info
            result.task_info['ips'] = data.oxidation_potential

            # Add to database
            with open(self.output_dir.joinpath('moldata-records.json'), 'a') as fp:
                print(json.dumps([datetime.now().timestamp(), data.json()]), file=fp)
            self.database.update_molecule(data)

            # If the database is complete, set "done"
            if self.output_property.split(".")[-1] in data.oxidation_potential:
                self.until_retrain -= 1
                self.logger.info(f'High fidelity complete. {self.until_retrain} before retraining')
            else:
                self.to_reevaluate.append(data)
                self.until_reevaluate -= 1
                self.logger.info(f'Low fidelity complete. {self.until_reevaluate} before re-ordering')

            # Check if we should re-do training
            if self.until_retrain <= 0 and not self.done.is_set():
                # If we have enough new
                self.logger.info('Triggering training to start')
                self.start_training.set()
            elif self.until_reevaluate <= 0 and not (self.start_training.is_set() or self.done.is_set()):
                # Restart inference if we have had enough complete computations
                self.logger.info('Triggering inference to begin again')
                self.start_inference.set()

            # Write to disk
            with open(self.output_dir.joinpath('qcfractal-records.json'), 'a') as fp:
                for r in opt_records + spe_records:
                    print(r.json(), file=fp)
            self.logger.info(f'Added complete calculation for {inchi} to database.')
        else:
            self.logger.info(f'Computations failed for {inchi}. Check JSON file for stacktrace')

        # Write out the result to disk
        with open(self.output_dir.joinpath('simulation-results.json'), 'a') as fp:
            print(result.json(exclude={'value'}), file=fp)
Beispiel #15
0
    def process_outputs(self, result: Result):
        # Release nodes for use by other processes
        self.rec.release("simulation", 1)

        # Unpack the task information
        inchi = result.task_info['inchi']
        method = result.method
        level = result.task_info['level']

        # If successful, add to the database
        self.logger.info(f'Completed {method} at {level} for {inchi}')
        if result.success:
            # Store the data in a molecule data object
            data = self.database.get_molecule_record(inchi=inchi)
            if method == 'relax_structure':
                data.add_geometry(result.value)
            else:
                data.add_single_point(result.value)
            data.update_thermochem()
            apply_recipes(data)

            # If there are still more computations left to complete a level, re-add it to the priority queue
            # This happens only if a new geometry was created
            cur_recipe = get_recipe_by_name(result.task_info['level'])
            try:
                to_run = cur_recipe.get_required_calculations(
                    data, self.search_spec.oxidation_state)
            except KeyError:
                to_run = []
            if len(to_run) > 0 and result.method == 'relax_structure':
                self.logger.info(
                    'Not yet done with the recipe. Re-adding to task queue')
                self.task_queue.put(
                    _PriorityEntry(
                        inchi=inchi,
                        item=result.task_info,
                        score=-np.inf  # Put it at the front of the queue
                    ))
            elif len(to_run) == 0:
                # Mark that we've had another complete result
                self.n_evaluated += 1
                self.logger.info(
                    f'Success! Finished screening {self.n_evaluated}/{self.n_to_evaluate} molecules'
                )

                # Determine whether to start re-training
                if self.n_evaluated % self.n_complete_before_retrain == 0:
                    if self.update_in_progress.is_set():
                        self.logger.info(
                            f'Waiting until previous training run completes.')
                    else:
                        self.logger.info(f'Starting retraining.')
                        self.start_training.set()
                self.logger.info(
                    f'{self.n_complete_before_retrain - self.n_evaluated % self.n_complete_before_retrain}'
                    ' results needed until we re-train again')

            # Attach the data source for the molecule
            data.subsets.append(self.search_space_name)

            # Add the IPs to the result object
            result.task_info["ip"] = data.oxidation_potential.copy()
            result.task_info["ea"] = data.reduction_potential.copy()

            # Add to database
            with open(self.output_dir.joinpath('moldata-records.json'),
                      'a') as fp:
                print(json.dumps([datetime.now().timestamp(),
                                  data.json()]),
                      file=fp)
            self.database.update_molecule(data)

            # Write to disk
            with gzip.open('qcfractal-records.json.gz', 'at') as fp:
                print(result.value.json(), file=fp)
            self.logger.info(
                f'Added complete calculation for {inchi} to database.')
        else:
            self.failed_molecules.add(inchi)
            self.logger.info(
                f'Computations failed for {inchi}. Check JSON file for stacktrace'
            )

        # Write out the result to disk
        result.task_info['inputs'] = str(result.inputs)
        with open(self.output_dir.joinpath('simulation-results.json'),
                  'a') as fp:
            print(result.json(exclude={'inputs', 'value'}), file=fp)
        self.logger.info(f'Processed simulation task.')
Beispiel #16
0
    def record_qc(self, result: Result):
        # Get basic task information
        inchi = result.task_info['inchi']
        self.logger.info(f'{result.method} computation for {inchi} finished')
        
        # Check if it failed due to a ManagerLost exception
        if result.failure_info is not None and \
            'Task failure due to loss of manager' in result.failure_info.exception:
            # If so, resubmit it
            self.logger.info('Task failed due to manager loss. Resubmitting, as this task could still succeed')
            self.queues.send_inputs(*result.args, input_kwargs=result.kwargs, task_info=result.task_info,
                                    method=result.method, keep_inputs=True, topic='simulate')
            return

        # Release nodes for use by other processes
        self.rec.release("simulation", self.nodes_per_qc)

        # If successful, add to the database
        if result.success:
            self.n_evaluated += 1

            # Check if we are done
            if self.n_evaluated >= self.n_to_evaluate:
                self.logger.info(f'We have evaluated as many molecules as requested. exiting')
                self.done.set()

            # Write outputs to disk
            opt_records, spe_records = result.value
            with open(self.output_dir.joinpath('..', '..', 'qcfractal-records.json'), 'a') as fp:
                for r in opt_records + spe_records:
                    r.extras['inchi'] = inchi
                    print(r.json(), file=fp)

            # Store the data in a molecule data object
            data = self.database.get_molecule_record(inchi=inchi)  # Get existing information
            store_success = False
            try:
                for r in opt_records:
                    data.add_geometry(r, overwrite=True)
                for r in spe_records:
                    data.add_single_point(r)
                store_success = True
            except UnmatchedGeometry:
                self.logger.warning(f'Failed to match {inchi} geometry to an existing record.'
                                    ' Tell Logan his hashes are broken again!')
            apply_recipes(data)  # Compute the IP

            # Add ionization potentials to the task_info
            result.task_info['ips'] = data.oxidation_potential
            result.task_info['eas'] = data.reduction_potential

            # Add to database
            with open(self.output_dir.joinpath('moldata-records.json'), 'a') as fp:
                print(json.dumps([datetime.now().timestamp(), data.json()]), file=fp)
            self.database.update_molecule(data)

            # Mark if we have completed a new record of the output property
            outputs = data.oxidation_potential if self.oxidize else data.reduction_potential
            if self.target_recipe.name in outputs:  # All SPE are complete
                self.until_retrain -= 1
                self.logger.info(f'High fidelity complete. {self.until_retrain} before retraining')
            elif result.task_info['method'] != "compute_single_point" and store_success:
                self.until_reevaluate -= 1
                self.logger.info(f'Low fidelity complete. {self.until_reevaluate} before re-ordering')
                if result.method == 'compute_vertical':
                    self.to_reevaluate['adiabatic'].append(data)
                else:
                    self.to_reevaluate['normal'].append(data)

            # Check if we should re-do training or re-run inference
            if self.until_retrain <= 0 and not self.done.is_set():
                # If we have enough new
                self.logger.info('Triggering training to start')
                self.start_training.set()
            elif self.until_reevaluate <= 0 and not (self.start_training.is_set() or self.done.is_set()):
                # Restart inference if we have had enough complete computations
                self.logger.info('Triggering inference to begin again')
                self.start_inference.set()

            self.logger.info(f'Added complete calculation for {inchi} to database.')
        else:
            self.logger.info(f'Computations failed for {inchi}. Check JSON file for stacktrace')

        # Write out the result to disk
        with open(self.output_dir.joinpath('simulation-results.json'), 'a') as fp:
            print(result.json(exclude={'value'}), file=fp)