コード例 #1
0
    def executeInstanceStochasticAdaptiveRefinement(self, random_variable):
        """
        Method executing an instance of the UQ algorithm, i.e. a single MC realization and eventually the refinement (that occurs before the simulation run). To be called if the selected refinement strategy is stochastic_adaptive_refinement.

        Inputs:
        - self: an instance of the class.

        Outputs:
        - qoi: list. It contains the quantities of interest.
        - time_for_qoi: float. Measure of time to generate the sample.
        """

        current_index = self.solverWrapperIndex[0]
        # local variables
        pickled_coarse_model = self.pickled_model[0]
        pickled_reference_model_mapping = pickled_coarse_model
        pickled_coarse_project_parameters = self.pickled_project_parameters[0]
        pickled_custom_metric_refinement_parameters = self.pickled_custom_metric_refinement_parameters
        pickled_custom_remesh_refinement_parameters = self.pickled_custom_remesh_refinement_parameters
        current_analysis = self.analysis
        different_tasks = self.different_tasks
        mapping_flag = self.mapping_output_quantities
        adaptive_refinement_jump_to_finest_level = self.adaptive_refinement_jump_to_finest_level
        print_to_file = self.print_to_file
        current_local_contribution = self.current_local_contribution
        time_for_qoi = 0.0
        if (different_tasks is False):  # tasks all at once
            qoi,time_for_qoi = \
                mds.executeInstanceStochasticAdaptiveRefinementAllAtOnce_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_analysis,time_for_qoi,mapping_flag,adaptive_refinement_jump_to_finest_level,print_to_file,current_local_contribution)
        elif (different_tasks is True):  # multiple tasks
            if (current_index == 0):  # index = 0
                current_local_index = 0
                qoi,pickled_current_model,time_for_qoi = \
                    mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_local_index,current_analysis,time_for_qoi,mapping_flag,print_to_file,current_local_contribution)
                delete_object(pickled_current_model)
            else:  # index > 0
                for current_local_index in range(current_index + 1):
                    if ((adaptive_refinement_jump_to_finest_level is False) or
                        (adaptive_refinement_jump_to_finest_level is True and
                         (current_local_index == 0
                          or current_local_index == current_index))):
                        if (mapping_flag is False):
                            qoi,pickled_current_model,time_for_qoi = \
                                mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_local_index,current_analysis,time_for_qoi,mapping_flag,print_to_file,current_local_contribution)
                        elif (mapping_flag is True):
                            qoi,pickled_current_model,time_for_qoi = \
                                mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_local_index,current_analysis,time_for_qoi,mapping_flag,print_to_file,current_local_contribution,pickled_mapping_reference_model=pickled_reference_model_mapping)
                            delete_object(pickled_coarse_model)
                            del (pickled_coarse_model)
                        pickled_coarse_model = pickled_current_model
                        del (pickled_current_model)
                    else:  # not running since we jump from coarsest to finest level
                        pass
                delete_object(pickled_coarse_model)
        else:
            raise Exception(
                "Boolean variable different task is not a boolean, instead is equal to",
                different_tasks)
        return qoi, time_for_qoi
コード例 #2
0
 def executeInstanceStochasticAdaptiveRefinement(self, random_variable):
     current_index = self.solverWrapperIndex[0]
     # local variables
     pickled_coarse_model = self.pickled_model[0]
     pickled_reference_model_mapping = pickled_coarse_model
     pickled_coarse_project_parameters = self.pickled_project_parameters[0]
     pickled_custom_metric_refinement_parameters = self.pickled_custom_metric_refinement_parameters
     pickled_custom_remesh_refinement_parameters = self.pickled_custom_remesh_refinement_parameters
     current_analysis = self.analysis
     different_tasks = self.different_tasks
     mapping_flag = self.mapping_output_quantities
     adaptive_refinement_jump_to_finest_level = self.adaptive_refinement_jump_to_finest_level
     print_to_file = self.print_to_file
     current_local_contribution = self.current_local_contribution
     time_for_qoi = 0.0
     if (different_tasks is False):  # tasks all at once
         qoi,time_for_qoi = \
             mds.executeInstanceStochasticAdaptiveRefinementAllAtOnce_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_analysis,time_for_qoi,mapping_flag,adaptive_refinement_jump_to_finest_level,print_to_file,current_local_contribution)
     elif (different_tasks is True):  # multiple tasks
         if (current_index == 0):  # index = 0
             current_local_index = 0
             qoi,pickled_current_model,time_for_qoi = \
                 mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_local_index,current_analysis,time_for_qoi,mapping_flag,print_to_file,current_local_contribution)
             delete_object(pickled_current_model)
         else:  # index > 0
             for current_local_index in range(current_index + 1):
                 if ((adaptive_refinement_jump_to_finest_level is False) or
                     (adaptive_refinement_jump_to_finest_level is True and
                      (current_local_index == 0
                       or current_local_index == current_index))):
                     if (mapping_flag is False):
                         qoi,pickled_current_model,time_for_qoi = \
                             mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_local_index,current_analysis,time_for_qoi,mapping_flag,print_to_file,current_local_contribution)
                     elif (mapping_flag is True):
                         qoi,pickled_current_model,time_for_qoi = \
                             mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(current_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_local_index,current_analysis,time_for_qoi,mapping_flag,print_to_file,current_local_contribution,pickled_mapping_reference_model=pickled_reference_model_mapping)
                         delete_object(pickled_coarse_model)
                         del (pickled_coarse_model)
                     pickled_coarse_model = pickled_current_model
                     del (pickled_current_model)
                 else:  # not running since we jump from coarsest to finest level
                     pass
             delete_object(pickled_coarse_model)
     else:
         raise Exception(
             "Boolean variable different task is not a boolean, instead is equal to",
             different_tasks)
     return qoi, time_for_qoi
コード例 #3
0
    def SerializeModelParametersDeterministicAdaptiveRefinement(self):
        """
        Method serializing and pickling the Kratos Model and the Kratos Parameters of the problem. It builds self.pickled_model and self.pickled_project_parameters. To be called if the selected refinement strategy is deterministic_adaptive_refinement.

        Inputs:
        - self: an instance of the class.
        """

        self.SerializeModelParametersStochasticAdaptiveRefinement(
        )  # to prepare parameters and model part of coarsest level
        number_levels_to_serialize = self.solverWrapperIndex[0]
        # same routine of ExecuteInstanceConcurrentAdaptiveRefinemnt() to build models and parameters, but here we save models and parameters
        pickled_coarse_model = self.pickled_model[0]
        pickled_coarse_project_parameters = self.pickled_project_parameters[0]
        pickled_custom_metric_refinement_parameters = self.pickled_custom_metric_refinement_parameters
        pickled_custom_remesh_refinement_parameters = self.pickled_custom_remesh_refinement_parameters
        current_analysis = self.analysis
        # generate the sample and prepare auxiliary variables we need
        fake_sample = self.fake_sample_to_serialize
        fake_computational_time = 0.0
        if (number_levels_to_serialize > 0):
            for current_level in range(number_levels_to_serialize + 1):
                fake_qoi,pickled_current_model,fake_computational_time = \
                    mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(number_levels_to_serialize,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,fake_sample,current_level,current_analysis,fake_computational_time,mapping_flag=False,print_to_file=False,current_contribution=0)
                del (pickled_coarse_model)
                pickled_coarse_model = pickled_current_model
                # save if current level > 0 (level = 0 has already been saved)
                if (current_level > 0):
                    # save pickled and serialized model and parameters
                    self.pickled_model.append(pickled_current_model)
                    self.serialized_model.append(
                        pickle.loads(pickled_current_model))
                del (pickled_current_model)
コード例 #4
0
    def SerializeModelParametersDeterministicAdaptiveRefinement(self):
        """
        Method serializing and pickling the Kratos Model and the Kratos Parameters of the problem. It builds self.pickled_model and self.pickled_project_parameters. To be called if the selected refinement strategy is deterministic_adaptive_refinement.

        Inputs:


        """

        # Serialize model and parameters of coarsest level (level = 0).
        # If we are running with MPI parallel type,
        # the model is being serialized in a MPI task
        # with the same number of processes required by level = self.solverWrapperIndex[0].
        # This strategy works in both cases the solverWrapper instance is solving level 0
        # or if it is solving levels > 0.
        self.SerializeModelParametersStochasticAdaptiveRefinement()
        # now serialize levels > 0
        number_levels_to_serialize = self.solverWrapperIndex[0]
        # same routine of executeInstanceStochasticAdaptiveRefinement() to build models and parameters, but here we save models and parameters
        pickled_coarse_model = self.pickled_model[0]
        pickled_coarse_project_parameters = self.pickled_project_parameters[0]
        pickled_custom_metric_refinement_parameters = self.pickled_custom_metric_refinement_parameters
        pickled_custom_remesh_refinement_parameters = self.pickled_custom_remesh_refinement_parameters
        current_analysis = self.analysis
        # generate the sample and prepare auxiliary variables we need
        fake_sample = self.fake_sample_to_serialize
        fake_computational_time = 0.0
        if (number_levels_to_serialize > 0):
            for current_level in range(number_levels_to_serialize + 1):
                if not self.is_mpi:  # serial
                    fake_qoi,pickled_current_model,fake_computational_time = \
                        mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(number_levels_to_serialize,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,fake_sample,current_level,current_analysis,fake_computational_time,mapping_flag=False,print_to_file=False,current_contribution=0)
                elif self.is_mpi and current_level == number_levels_to_serialize:  # MPI and we serialize level of interest
                    adaptive_refinement_jump_to_finest_level = self.adaptive_refinement_jump_to_finest_level
                    pickled_current_model = mpi_mds.SerializeDeterministicAdaptiveRefinementMPIModel_Wrapper(
                        current_level, pickled_coarse_model,
                        pickled_coarse_project_parameters,
                        pickled_custom_metric_refinement_parameters,
                        pickled_custom_remesh_refinement_parameters,
                        fake_sample, current_analysis, fake_computational_time,
                        adaptive_refinement_jump_to_finest_level)
                else:  # MPI parallel type and we do not serialize since it is not the level of interest
                    # we set pickled model equal to coarsest model as workaround
                    pickled_current_model = pickled_coarse_model
                del (pickled_coarse_model)
                pickled_coarse_model = pickled_current_model
                # save if current level > 0 (level = 0 has already been saved)
                if (current_level > 0):
                    # save pickled and serialized model and parameters
                    self.pickled_model.append(pickled_current_model)
                    # self.serialized_model.append(pickle.loads(get_value_from_remote(pickled_current_model))) # commented since gives problem when solving with PyCOMPSs
                    self.pickled_project_parameters.append(
                        pickled_coarse_project_parameters)
                    # self.serialized_project_parameters.append(pickle.loads(get_value_from_remote(pickled_coarse_project_parameters))) # commented since gives problem when solving with PyCOMPSs
                del (pickled_current_model)
コード例 #5
0
 def SerializeModelParametersDeterministicAdaptiveRefinement(self):
     self.SerializeModelParametersStochasticAdaptiveRefinement(
     )  # to prepare parameters and model part of coarsest level
     number_levels_to_serialize = self.solverWrapperIndex[0]
     # same routine of ExecuteInstanceConcurrentAdaptiveRefinemnt() to build models and parameters, but here we save models and parameters
     pickled_coarse_model = self.pickled_model[0]
     pickled_coarse_project_parameters = self.pickled_project_parameters[0]
     pickled_custom_metric_refinement_parameters = self.pickled_custom_metric_refinement_parameters
     pickled_custom_remesh_refinement_parameters = self.pickled_custom_remesh_refinement_parameters
     current_analysis = self.analysis
     # generate the sample and prepare auxiliary variables we need
     fake_sample = self.fake_sample_to_serialize
     fake_computational_time = 0.0
     if (number_levels_to_serialize > 0):
         for current_level in range(number_levels_to_serialize + 1):
             if (current_level < number_levels_to_serialize):
                 fake_qoi,pickled_current_model,fake_computational_time = \
                     mds.executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(number_levels_to_serialize,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,fake_sample,current_level,current_analysis,fake_computational_time)
                 # mds.executeInstanceStochasticAdaptiveRefinementAux_Task(number_levels_to_serialize,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,fake_sample,current_level,current_analysis,fake_computational_time)
                 del (pickled_coarse_model)
                 pickled_coarse_model = pickled_current_model
             elif (current_level == number_levels_to_serialize):
                 pickled_current_model, fake_computational_time = mds.executeInstanceOnlyAdaptiveRefinement_Wrapper(
                     pickled_coarse_model,
                     pickled_coarse_project_parameters,
                     pickled_custom_metric_refinement_parameters,
                     pickled_custom_remesh_refinement_parameters,
                     fake_sample, current_level, current_analysis,
                     fake_computational_time)
             # save if current level > 0
             if (current_level > 0):
                 # save pickled and serialized model and parameters
                 self.pickled_model.append(pickled_current_model)
                 self.serialized_model.append(
                     pickle.loads(pickled_current_model))
             del (pickled_current_model)