Exemple #1
0
def executeInstanceReadingFromFile_Wrapper(
        current_index, pickled_model, pickled_project_parameters,
        current_analysis, random_variable, time_for_qoi, mapping_flag,
        pickled_mapping_reference_model, print_to_file, current_contribution):
    if (current_index == 0):
        qoi_and_time_list = executeInstanceReadingFromFileAuxLev0_Task(
            pickled_model, pickled_project_parameters, current_analysis,
            random_variable, time_for_qoi, mapping_flag,
            pickled_mapping_reference_model, print_to_file, "filename_level_" +
            str(current_index) + "_contribution_" + str(current_contribution) +
            "_random_variable_" + str(random_variable[0]) + ".dat")
    elif (current_index == 1):
        qoi_and_time_list = executeInstanceReadingFromFileAuxLev1_Task(
            pickled_model, pickled_project_parameters, current_analysis,
            random_variable, time_for_qoi, mapping_flag,
            pickled_mapping_reference_model, print_to_file, "filename_level_" +
            str(current_index) + "_contribution_" + str(current_contribution) +
            "_random_variable_" + str(random_variable[0]) + ".dat")
    elif (current_index == 2):
        qoi_and_time_list = executeInstanceReadingFromFileAuxLev2_Task(
            pickled_model, pickled_project_parameters, current_analysis,
            random_variable, time_for_qoi, mapping_flag,
            pickled_mapping_reference_model, print_to_file, "filename_level_" +
            str(current_index) + "_contribution_" + str(current_contribution) +
            "_random_variable_" + str(random_variable[0]) + ".dat")
    else:
        raise Exception("Level not supported")
    if IsDistributedRun():
        # running with mpirun the whole xmc algorithm
        qoi, time_for_qoi = UnfoldQT(qoi_and_time_list)
    else:
        # running with distributed environment framework, only Kratos tasks are run with mpi
        qoi, time_for_qoi = UnfoldFutureQT(qoi_and_time_list)
    return qoi, time_for_qoi
def SelectAndVerifyLinearSolver(settings, skiptest):
    # The mechanical solver selects automatically the fastest linear-solver available
    # this might not be appropriate for a test, therefore in case nothing is specified,
    # the previous default linear-solver is set
    if not settings["solver_settings"].Has("linear_solver_settings"):
        # check if running in MPI because there we use a different default linear solver
        if IsDistributedRun():
            default_lin_solver_settings = KratosMultiphysics.Parameters("""{
                "solver_type" : "amesos",
                "amesos_solver_type" : "Amesos_Klu"
            }""")

        else:
            default_lin_solver_settings = KratosMultiphysics.Parameters("""{
                "solver_type": "EigenSolversApplication.sparse_lu"
            }""")
        settings["solver_settings"].AddValue("linear_solver_settings",
                                             default_lin_solver_settings)

    solver_type = settings["solver_settings"]["linear_solver_settings"][
        "solver_type"].GetString()
    solver_type_splitted = solver_type.split(".")
    if len(solver_type_splitted) == 2:
        # this means that we use a solver from an application
        # hence we have to check if it exists, otherwise skip the test
        app_name = solver_type_splitted[0]
        solver_name = solver_type_splitted[1]
        if not kratos_utils.CheckIfApplicationsAvailable(app_name):
            skiptest(
                'Application "{}" is needed for the specified solver "{}" but is not available'
                .format(app_name, solver_name))
Exemple #3
0
    def setUp(self):
        # Within this location context:
        with KratosUnittest.WorkFolderScope(".", __file__):

            # Reading the ProjectParameters
            with open(self.file_name + "_parameters.json",
                      'r') as parameter_file:
                ProjectParameters = KratosMultiphysics.Parameters(
                    parameter_file.read())

            # The mechanical solver selects automatically the fastest linear-solver available
            # this might not be appropriate for a test, therefore in case nothing is specified,
            # the previous default linear-solver is set
            if not ProjectParameters["solver_settings"].Has(
                    "linear_solver_settings"):
                # check if running in MPI because there we use a different default linear solver
                if IsDistributedRun():
                    default_lin_solver_settings = KratosMultiphysics.Parameters(
                        """{
                        "solver_type" : "amesos",
                        "amesos_solver_type" : "Amesos_Klu"
                    }""")

                else:
                    default_lin_solver_settings = KratosMultiphysics.Parameters(
                        """{
                        "solver_type": "EigenSolversApplication.sparse_lu"
                    }""")
                ProjectParameters["solver_settings"].AddValue(
                    "linear_solver_settings", default_lin_solver_settings)

            solver_type = ProjectParameters["solver_settings"][
                "linear_solver_settings"]["solver_type"].GetString()
            solver_type_splitted = solver_type.split(".")
            if len(solver_type_splitted) == 2:
                # this means that we use a solver from an application
                # hence we have to check if it exists, otherwise skip the test
                app_name = solver_type_splitted[0]
                solver_name = solver_type_splitted[1]
                if not kratos_utils.CheckIfApplicationsAvailable(app_name):
                    self.skipTest(
                        'Application "{}" is needed for the specified solver "{}" but is not available'
                        .format(app_name, solver_name))

            self.modify_parameters(ProjectParameters)

            # To avoid many prints
            if ProjectParameters["problem_data"]["echo_level"].GetInt() == 0:
                KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(
                    KratosMultiphysics.Logger.Severity.WARNING)
            else:
                KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(
                    KratosMultiphysics.Logger.Severity.INFO)

            # Creating the test
            model = KratosMultiphysics.Model()
            self.test = StructuralMechanicsAnalysis(model, ProjectParameters)
            self.test.Initialize()
Exemple #4
0
def GetKratosObjectType(type_name):
    type_dict = {
        "LinearSolverFactory": [
            "KratosMultiphysics.python_linear_solver_factory.ConstructSolver",
            "KratosMultiphysics.TrilinosApplication.trilinos_linear_solver_factory.ConstructSolver"
        ],
        "ResidualBasedNewtonRaphsonStrategy": [
            "KratosMultiphysics.ResidualBasedNewtonRaphsonStrategy",
            "KratosMultiphysics.TrilinosApplication.TrilinosNewtonRaphsonStrategy"
        ],
        "MixedGenericCriteria": [
            "KratosMultiphysics.MixedGenericCriteria",
            "KratosMultiphysics.TrilinosApplication.TrilinosMixedGenericCriteria"
        ],
        "ResidualBasedIncrementalUpdateStaticScheme": [
            "KratosMultiphysics.ResidualBasedIncrementalUpdateStaticScheme",
            "KratosMultiphysics.TrilinosApplication.TrilinosResidualBasedIncrementalUpdateStaticScheme"
        ],
        "SteadyScalarScheme": [
            "KratosMultiphysics.RANSApplication.SteadyScalarScheme",
            "KratosMultiphysics.RANSApplication.TrilinosExtension.MPISteadyScalarScheme"
        ],
        "AlgebraicFluxCorrectedSteadyScalarScheme": [
            "KratosMultiphysics.RANSApplication.AlgebraicFluxCorrectedSteadyScalarScheme",
            "KratosMultiphysics.RANSApplication.TrilinosExtension.MPIAlgebraicFluxCorrectedSteadyScalarScheme"
        ],
        "BossakRelaxationScalarScheme": [
            "KratosMultiphysics.RANSApplication.BossakRelaxationScalarScheme",
            "KratosMultiphysics.RANSApplication.TrilinosExtension.MPIBossakRelaxationScalarScheme"
        ],
        "RansWallDistanceCalculationProcess": [
            "KratosMultiphysics.RANSApplication.RansWallDistanceCalculationProcess",
            "KratosMultiphysics.RANSApplication.TrilinosExtension.TrilinosRansWallDistanceCalculationProcess"
        ],
        "ResidualBasedSimpleSteadyScheme": [
            "KratosMultiphysics.FluidDynamicsApplication.ResidualBasedSimpleSteadyScheme",
            "KratosMultiphysics.FluidDynamicsApplication.TrilinosExtension.TrilinosResidualBasedSimpleSteadyScheme"
        ],
        "ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent": [
            "KratosMultiphysics.FluidDynamicsApplication.ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent",
            "KratosMultiphysics.FluidDynamicsApplication.TrilinosExtension.TrilinosPredictorCorrectorVelocityBossakSchemeTurbulent"
        ]
    }

    if (type_name not in type_dict.keys()):
        raise Exception(
            type_name +
            " not found in type_dict. Followings are allowed type_names:\n\t" +
            "\n\t".join(sorted(type_dict.keys())))

    module_info = type_dict[type_name][IsDistributedRun()]
    index = module_info.rfind(".")
    module_name = module_info[:index]
    attribute_name = module_info[index + 1:]

    module = import_module(module_name)
    return getattr(module, attribute_name)
Exemple #5
0
    def _CreateSolverSettings(self, *args):
        if (self.IsPeriodic()):
            solver_settings_type = GetKratosObjectPrototype("FractionalStepSettingsPeriodic")
        else:
            solver_settings_type = GetKratosObjectPrototype("FractionalStepSettings")

        if (IsDistributedRun()):
            return solver_settings_type(self.GetCommunicator(), *args)
        else:
            return solver_settings_type(*args)
Exemple #6
0
 def ImportModelPart(self):
     if (IsDistributedRun()):
         ## Construct the MPI import model part utility
         self.distributed_model_part_importer = DistributedImportModelPartUtility(
             self.main_model_part, self.settings)
         ## Execute the Metis partitioning and reading
         self.distributed_model_part_importer.ImportModelPart()
     else:
         # we can use the default implementation in the base class
         self._ImportModelPart(self.main_model_part,
                               self.settings["model_import_settings"])
Exemple #7
0
    def AddVariables(self):
        self.formulation.AddVariables()

        if self.is_periodic:
            self.main_model_part.AddNodalSolutionStepVariable(
                KratosCFD.PATCH_INDEX)

        if (IsDistributedRun()):
            self.main_model_part.AddNodalSolutionStepVariable(
                Kratos.PARTITION_INDEX)

        Kratos.Logger.PrintInfo(self.__class__.__name__,
                                "Solver variables added correctly.")
    def setUp(self):
        # Within this location context:
        with KratosUnittest.WorkFolderScope(".", __file__):

            # Reading the ProjectParameters
            with open(self.file_name + "_parameters.json",
                      'r') as parameter_file:
                ProjectParameters = KratosMultiphysics.Parameters(
                    parameter_file.read())

            # The mechanical solver selects automatically the fastest linear-solver available
            # this might not be appropriate for a test, therefore in case nothing is specified,
            # the previous default linear-solver is set
            if not ProjectParameters["solver_settings"].Has(
                    "linear_solver_settings"):
                # check if running in MPI because there we use a different default linear solver
                if IsDistributedRun():
                    default_lin_solver_settings = KratosMultiphysics.Parameters(
                        """{
                        "solver_type" : "amesos",
                        "amesos_solver_type" : "Amesos_Klu"
                    }""")

                else:
                    default_lin_solver_settings = KratosMultiphysics.Parameters(
                        """{
                        "solver_type": "ExternalSolversApplication.super_lu",
                        "max_iteration": 500,
                        "tolerance": 1e-9,
                        "scaling": false,
                        "symmetric_scaling": true,
                        "verbosity": 0
                    }""")
                ProjectParameters["solver_settings"].AddValue(
                    "linear_solver_settings", default_lin_solver_settings)

            self.modify_parameters(ProjectParameters)

            # To avoid many prints
            if ProjectParameters["problem_data"]["echo_level"].GetInt() == 0:
                KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(
                    KratosMultiphysics.Logger.Severity.WARNING)
            else:
                KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(
                    KratosMultiphysics.Logger.Severity.INFO)

            # Creating the test
            model = KratosMultiphysics.Model()
            self.test = StructuralMechanicsAnalysis(model, ProjectParameters)
            self.test.Initialize()
Exemple #9
0
def CreateBlockBuilderAndSolver(linear_solver, is_periodic, communicator):
    if (IsDistributedRun()):
        if (is_periodic):
            return TrilinosBlockBuilderAndSolverPeriodic(
                communicator, 30, linear_solver, KratosCFD.PATCH_INDEX)
        else:
            return TrilinosBlockBuilderAndSolver(communicator, 30,
                                                 linear_solver)
    else:
        if (is_periodic):
            return ResidualBasedBlockBuilderAndSolverPeriodic(
                linear_solver, KratosCFD.PATCH_INDEX)
        else:
            return ResidualBasedBlockBuilderAndSolver(linear_solver)
Exemple #10
0
def executeInstanceStochasticAdaptiveRefinementAllAtOnce_Wrapper(
        current_index, pickled_coarse_model, pickled_coarse_project_parameters,
        pickled_custom_metric_refinement_parameters,
        pickled_custom_remesh_refinement_parameters, random_variable,
        current_analysis, time_for_qoi, mapping_flag,
        adaptive_refinement_jump_to_finest_level, print_to_file,
        current_contribution):
    if (current_index == 0):
        qoi_and_time_list = ExecuteInstanceStochasticAdaptiveRefinementAllAtOnceAuxLev0_Task(
            current_index, pickled_coarse_model,
            pickled_coarse_project_parameters,
            pickled_custom_metric_refinement_parameters,
            pickled_custom_remesh_refinement_parameters, random_variable,
            current_analysis, time_for_qoi, mapping_flag,
            adaptive_refinement_jump_to_finest_level, print_to_file,
            "filename_level_" + str(current_index) + "_contribution_" +
            str(current_contribution) + "_random_variable_" +
            str(random_variable[0]) + ".dat")
    elif (current_index == 1):
        qoi_and_time_list = ExecuteInstanceStochasticAdaptiveRefinementAllAtOnceAuxLev1_Task(
            current_index, pickled_coarse_model,
            pickled_coarse_project_parameters,
            pickled_custom_metric_refinement_parameters,
            pickled_custom_remesh_refinement_parameters, random_variable,
            current_analysis, time_for_qoi, mapping_flag,
            adaptive_refinement_jump_to_finest_level, print_to_file,
            "filename_level_" + str(current_index) + "_contribution_" +
            str(current_contribution) + "_random_variable_" +
            str(random_variable[0]) + ".dat")
    elif (current_index == 2):
        qoi_and_time_list = ExecuteInstanceStochasticAdaptiveRefinementAllAtOnceAuxLev2_Task(
            current_index, pickled_coarse_model,
            pickled_coarse_project_parameters,
            pickled_custom_metric_refinement_parameters,
            pickled_custom_remesh_refinement_parameters, random_variable,
            current_analysis, time_for_qoi, mapping_flag,
            adaptive_refinement_jump_to_finest_level, print_to_file,
            "filename_level_" + str(current_index) + "_contribution_" +
            str(current_contribution) + "_random_variable_" +
            str(random_variable[0]) + ".dat")
    else:
        raise Exception("Level not supported")
    if IsDistributedRun():
        # running with mpirun the whole xmc algorithm
        qoi, time_for_qoi = UnfoldQT(qoi_and_time_list)
    else:
        # running with distributed environment framework, only Kratos tasks are run with mpi
        qoi, time_for_qoi = UnfoldFutureQT(qoi_and_time_list)
    return qoi, time_for_qoi
Exemple #11
0
    def PrepareModelPart(self):
        if not self.main_model_part.ProcessInfo[Kratos.IS_RESTARTED]:
            ## Set fluid properties from materials json file
            materials_imported = self._SetPhysicalProperties()
            if not materials_imported:
                Kratos.Logger.PrintWarning(
                    self.__class__.__name__,
                    "Material properties have not been imported. Check \'material_import_settings\' in your ProjectParameters.json."
                )
            ## Executes the check and prepare model process
            self._ExecuteCheckAndPrepare()
            ## Set buffer size
            self.main_model_part.SetBufferSize(self.min_buffer_size)

        if (IsDistributedRun()):
            self.distributed_model_part_importer.CreateCommunicators()

        self.formulation.PrepareModelPart()

        Kratos.Logger.PrintInfo(self.__class__.__name__,
                                "Model reading finished.")
Exemple #12
0
def executeInstanceStochasticAdaptiveRefinementMultipleTasks_Wrapper(
        current_index,
        pickled_coarse_model,
        pickled_coarse_project_parameters,
        pickled_custom_metric_refinement_parameters,
        pickled_custom_remesh_refinement_parameters,
        random_variable,
        current_local_index,
        current_analysis,
        time_for_qoi,
        mapping_flag,
        print_to_file,
        current_contribution,
        pickled_mapping_reference_model=None):
    if (current_index == 0):
        qoi_pickled_current_model_time_for_qoi_list = ExecuteInstanceStochasticAdaptiveRefinementMultipleTasksAuxLev0_Task(
            current_index, pickled_coarse_model,
            pickled_coarse_project_parameters,
            pickled_custom_metric_refinement_parameters,
            pickled_custom_remesh_refinement_parameters, random_variable,
            current_local_index, current_analysis, time_for_qoi, mapping_flag,
            pickled_mapping_reference_model, print_to_file, "filename_level_" +
            str(current_index) + "_contribution_" + str(current_contribution) +
            "_random_variable_" + str(random_variable[0]) + ".dat")
    else:
        # We cannot run with multiple tasks, since tasks of different levels are normally run with different number of processors,
        # and when running with MPI the model should be pickled with the number of processes of the task.
        # For example, if I want to run with MPI and 4 processes, I need to serialize within an MPI task of 4 processes.
        raise Exception(
            "Level not supported. You should set \"taskAllAtOnce\" to \"true\" to run multi-level algorithms with \"stochastic_adaptive_refinement\" as \"refinement_strategy\"."
        )
    if IsDistributedRun():
        # running with mpirun the whole xmc algorithm
        qoi, pickled_current_model, time_for_qoi = UnfoldQMT(
            qoi_pickled_current_model_time_for_qoi_list)
    else:
        # running with distributed environment framework, only Kratos tasks are run with mpi
        qoi, pickled_current_model, time_for_qoi = UnfoldFutureQMT(
            qoi_pickled_current_model_time_for_qoi_list)
    return qoi, pickled_current_model, time_for_qoi
Exemple #13
0
    def Initialize(self):
        if (IsDistributedRun()):
            self.EpetraComm = KratosTrilinos.CreateCommunicator()
            self.formulation.SetCommunicator(self.EpetraComm)
        else:
            self.formulation.SetCommunicator(None)

        self.main_model_part.ProcessInfo[Kratos.STEP] = 0

        # If needed, create the estimate time step utility
        if (self.settings["time_stepping"]["automatic_time_step"].GetBool()):
            self.EstimateDeltaTimeUtility = self._GetAutomaticTimeSteppingUtility(
            )

        RansVariableUtilities.AssignBoundaryFlagsToGeometries(
            self.main_model_part)
        self.formulation.Initialize()

        Kratos.Logger.PrintInfo(self.__class__.__name__,
                                self.formulation.GetInfo())

        Kratos.Logger.PrintInfo(self.__class__.__name__,
                                "Solver initialization finished.")
    def CreateStrategy(self, solver_settings, scheme_settings, model_part,
                       scalar_variable, scalar_variable_rate,
                       relaxed_scalar_variable_rate):
        default_solver_settings = Kratos.Parameters(r'''{
                "is_periodic"           : false,
                "relative_tolerance"    : 1e-3,
                "absolute_tolerance"    : 1e-5,
                "max_iterations"        : 200,
                "relaxation_factor"     : 0.5,
                "echo_level"            : 0,
                "linear_solver_settings": {
                    "solver_type"  : "amgcl"
                },
                "reform_dofs_at_each_step": true,
                "move_mesh_strategy": 0,
                "move_mesh_flag": false,
                "compute_reactions": false
        }''')

        default_scheme_settings = Kratos.Parameters(r'''{
            "scheme_type": "bossak",
            "alpha_bossak": -0.3
        }''')

        solver_settings.ValidateAndAssignDefaults(default_solver_settings)
        scheme_settings.ValidateAndAssignDefaults(default_scheme_settings)

        linear_solver = linear_solver_factory.ConstructSolver(
            solver_settings["linear_solver_settings"])

        is_periodic = solver_settings["is_periodic"].GetBool()

        if is_periodic:
            self.__InitializePeriodicConditions(model_part, scalar_variable)

        # TODO:
        if is_periodic and IsDistributedRun():
            msg = "\nCurrently periodic conditions in mpi is not supported due to following reasons:\n\n"
            msg += "    1. TrilinosResidualCriteria [ConvergenceCriterian]\n"
            msg += "PeriodicConditions duplicates one patch's equation ids to the counter patch. "
            msg += "The node and its corresponding dof might not fall in to the same partition raising an error in convergence calculation.\n\n"
            msg += "    2. ConnectivityPreserveModeller\n"
            msg += "Currently connectivity preserve modeller replaces all the conditions in an mdpa with given new condition. "
            msg += "This modeller is used to create modelparts having k-epsilon elements and conditions while sharing the same nodes as in VMS solution. "
            msg += "In the case of MPI, it is essential to have the PeriodicConditions in the mdpa file in order to properly distribute nodes to partitions using MetisApplication. "
            msg += "But if this is the case, PeriodicConditions also will be replaced by k-epsilon specific conditions casuing a segmentation fault.\n"
            msg += "    3. TrilinosBlockBuilderAndSolverPeriodic\n"
            msg += "In the case of MPI periodic in 2D, problem uses TrilinosBlockBuilderAndSolverPeriodic block builder and solver, which identifies "
            msg += "periodic conditions by number of nodes in the condition. So, In 2D all wall conditions and PeriodicConditions have only 2 nodes, all will be "
            msg += "considered as PeriodicConditions and will make the global assembly accordingly which is wrong."
            msg += "Therefore this error msg is printed in order to avoid confusion."
            raise Exception(msg)

        builder_and_solver = self.__CreateBuilderAndSolver(
            linear_solver, is_periodic)

        if (scheme_settings["scheme_type"].GetString() == "bossak"):
            convergence_criteria_type = scalar_convergence_criteria
        elif (scheme_settings["scheme_type"].GetString() == "steady"):
            convergence_criteria_type = residual_criteria

        convergence_criteria = convergence_criteria_type(
            solver_settings["relative_tolerance"].GetDouble(),
            solver_settings["absolute_tolerance"].GetDouble())

        if (scheme_settings["scheme_type"].GetString() == "bossak"):
            time_scheme = dynamic_scheme(
                scheme_settings["alpha_bossak"].GetDouble(),
                solver_settings["relaxation_factor"].GetDouble(),
                scalar_variable, scalar_variable_rate,
                relaxed_scalar_variable_rate)
        elif (scheme_settings["scheme_type"].GetString() == "steady"):
            time_scheme = steady_scheme(
                solver_settings["relaxation_factor"].GetDouble())
            self.fluid_model_part.ProcessInfo[
                Kratos.
                BOSSAK_ALPHA] = scheme_settings["alpha_bossak"].GetDouble()
            self.fluid_model_part.ProcessInfo[
                KratosRANS.IS_CO_SOLVING_PROCESS_ACTIVE] = True
            if (self.fluid_model_part.ProcessInfo[Kratos.DYNAMIC_TAU] != 0.0):
                Kratos.Logger.PrintWarning(
                    self.__class__.__name__,
                    "Steady solution doesn't have zero DYNAMIC_TAU [ DYNAMIC_TAU = "
                    +
                    str(self.fluid_model_part.ProcessInfo[Kratos.DYNAMIC_TAU])
                    + " ].")
        else:
            raise Exception("Unknown scheme_type = \"" +
                            scheme_settings["scheme_type"] + "\"")

        strategy = newton_raphson_strategy(
            model_part, time_scheme, linear_solver, convergence_criteria,
            builder_and_solver, solver_settings["max_iterations"].GetInt(),
            solver_settings["compute_reactions"].GetBool(),
            solver_settings["reform_dofs_at_each_step"].GetBool(),
            solver_settings["move_mesh_flag"].GetBool())

        strategy.SetEchoLevel(solver_settings["echo_level"].GetInt() - 2)
        builder_and_solver.SetEchoLevel(
            solver_settings["echo_level"].GetInt() - 3)
        convergence_criteria.SetEchoLevel(
            solver_settings["echo_level"].GetInt() - 1)

        if (is_periodic):
            Kratos.Logger.PrintInfo(
                self.__class__.__name__,
                "Successfully created periodic solving strategy for " +
                scalar_variable.Name() + ".")
        else:
            Kratos.Logger.PrintInfo(
                self.__class__.__name__,
                "Successfully created solving strategy for " +
                scalar_variable.Name() + ".")

        return strategy
from __future__ import print_function, absolute_import, division  #makes KratosMultiphysics backward compatible with python 2.6 and 2.7

from KratosMultiphysics.kratos_utilities import CheckIfApplicationsAvailable
from KratosMultiphysics import IsDistributedRun

from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis
if (IsDistributedRun() and CheckIfApplicationsAvailable("TrilinosApplication")):
    from KratosMultiphysics.RANSApplication.trilinos_fluid_solver_no_replace import TrilinosFluidSolverNoReplace as fluid_solver_no_replace
elif (not IsDistributedRun()):
    from KratosMultiphysics.RANSApplication.fluid_solver_no_replace import FluidSolverNoReplace as fluid_solver_no_replace
else:
    raise Exception("Distributed run requires TrilinosApplication")


class PeriodicFluidDynamicsAnalysis(FluidDynamicsAnalysis):
    def _CreateSolver(self):
        return fluid_solver_no_replace(self.model, self.project_parameters["solver_settings"])
# Application dependent names and paths
from KratosMultiphysics import _ImportApplication
from KratosMultiphysics import IsDistributedRun
if IsDistributedRun():
    import KratosMultiphysics.mpi  # importing the MPI-Core
from KratosMeshingApplication import *

application = KratosMeshingApplication()
application_name = "KratosMeshingApplication"

_ImportApplication(application, application_name)
Exemple #17
0
 def setUp(self):
     if (IsDistributedRun()):
         self.skipTest(
             "Skipping since Periodic tests are not designed to be run in MPI."
         )
Exemple #18
0
from importlib import import_module

import KratosMultiphysics as Kratos
import KratosMultiphysics.FluidDynamicsApplication as KratosCFD
import KratosMultiphysics.RANSApplication as KratosRANS

from KratosMultiphysics import IsDistributedRun
from KratosMultiphysics import VariableUtils
from KratosMultiphysics.kratos_utilities import CheckIfApplicationsAvailable

from KratosMultiphysics.RANSApplication import RansVariableUtilities

if (IsDistributedRun()
        and CheckIfApplicationsAvailable("TrilinosApplication")):
    from KratosMultiphysics.TrilinosApplication import TrilinosBlockBuilderAndSolverPeriodic
    from KratosMultiphysics.TrilinosApplication import TrilinosBlockBuilderAndSolver
elif (not IsDistributedRun()):
    from KratosMultiphysics import ResidualBasedBlockBuilderAndSolver
    from KratosMultiphysics.FluidDynamicsApplication import ResidualBasedBlockBuilderAndSolverPeriodic
else:
    raise Exception("Distributed run requires TrilinosApplication")


def GetKratosObjectPrototype(type_name):
    type_dict = {
        "LinearSolverFactory": [
            "KratosMultiphysics.python_linear_solver_factory.ConstructSolver",
            "KratosMultiphysics.TrilinosApplication.trilinos_linear_solver_factory.ConstructSolver"
        ],
        "ResidualBasedNewtonRaphsonStrategy": [
            "KratosMultiphysics.ResidualBasedNewtonRaphsonStrategy",
Exemple #19
0
class TestMPIParMmg(KratosUnittest.TestCase):

    @KratosUnittest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() <= 4,  "Test designed to be run with max. 4 ranks.")
    def test_mpi_sphere(self):
        KratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(KratosMultiphysics.Logger.Severity.WARNING)

        # We create the model part
        current_model = KratosMultiphysics.Model()
        main_model_part = current_model.CreateModelPart("MainModelPart")
        main_model_part.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, 3)

        # We add the variables needed
        main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISTANCE)
        main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DISTANCE_GRADIENT)


        # We import the model main_model_part
        file_path = GetFilePath("/parmmg_eulerian_test/background_mesh_sphere")
        ReadModelPart(file_path, main_model_part)

        communicator = main_model_part.GetCommunicator().GetDataCommunicator()

        for node in main_model_part.Nodes:
            distance = math.sqrt(node.X**2+node.Y**2+node.Z**2) - 1.0/2.0
            node.SetSolutionStepValue(KratosMultiphysics.DISTANCE,distance)

        ##COMPUTE DISTANCE GRADIENT AND NODAL_H
        local_gradient = KratosMultiphysics.ComputeNodalGradientProcess3D(main_model_part,
        KratosMultiphysics.DISTANCE,
        KratosMultiphysics.DISTANCE_GRADIENT,
        KratosMultiphysics.NODAL_AREA)

        local_gradient.Execute()
        find_nodal_h = KratosMultiphysics.FindNodalHNonHistoricalProcess(main_model_part)
        find_nodal_h.Execute()

        ##COMPUTE LEVEL SET METRIC
        metric_parameters = KratosMultiphysics.Parameters("""
        {
            "minimal_size"                             : 0.5,
            "sizing_parameters": {
                "reference_variable_name"               : "DISTANCE",
                "boundary_layer_max_distance"           : 2.0,
                "interpolation"                         : "constant"
            },
            "enforce_current"                      : false,
            "anisotropy_remeshing"                 : false
        }
        """)
        metric_process = KratosMultiphysics.MeshingApplication.ComputeLevelSetSolMetricProcess3D(
            main_model_part,
            KratosMultiphysics.DISTANCE_GRADIENT,
            metric_parameters)
        metric_process.Execute()

        ##PERFORM REMESHING
        pmmg_parameters = KratosMultiphysics.Parameters("""
        {
            "filename"                         : "output",
            "save_external_files"              : true,
            "save_colors_files"                : true,
            "initialize_entities"              : false,
            "preserve_flags"                   : false,
            "echo_level"                       : 0,
            "advanced_parameters" : {
                "number_of_iterations"         : 4
            }
        }
        """)
        pmmg_parameters["filename"].SetString(GetFilePath(pmmg_parameters["filename"].GetString()))
        pmmg_process = KratosMultiphysics.MeshingApplication.ParMmgProcess3D(main_model_part.GetRootModelPart(), pmmg_parameters)
        pmmg_process.Execute()

        reference_file_name = GetFilePath("parmmg_eulerian_test/cond_ref_map.json")
        result_file_name = GetFilePath("output_step=0_"+str(communicator.Rank())+".cond.ref.json")
        self._CompareColorFiles(reference_file_name, result_file_name)

        reference_file_name = GetFilePath("parmmg_eulerian_test/elem_ref_map.json")
        result_file_name = GetFilePath("output_step=0_"+str(communicator.Rank())+".elem.ref.json")
        self._CompareColorFiles(reference_file_name, result_file_name)

        result_dict_file_name=GetFilePath("parmmg_eulerian_test/reference_parmmg_spehere_mdpa_hierarchy.json")
        with open(result_dict_file_name, 'r') as f:
            reference_hierarchy = json.load(f)
        self.CheckModelPartHierarchie(main_model_part, reference_hierarchy[str(communicator.Size())])

        for file_name in os.listdir(GetFilePath("")):
            if file_name.endswith(".json") or file_name.endswith(".mdpa") or file_name.endswith(".mesh") or  file_name.endswith(".sol"):
                kratos_utilities.DeleteFileIfExisting(GetFilePath(file_name))
        kratos_utilities.DeleteTimeFiles(os.getcwd())

    def _CompareColorFiles(self, ref_dict_filename, result_dict_file_name):

        with open(ref_dict_filename, 'r') as f:
            reference_values = json.load(f)

        with open(result_dict_file_name, 'r') as f:
            result_values = json.load(f)

        self.assertEqual(len(reference_values.keys()), len(result_values.keys()))
        for key_ref, key_result in zip(reference_values.keys(), result_values.keys()):
            self.assertEqual(reference_values[key_ref], result_values[key_result])

    def _CheckModelPart(self, ref_model_part, result_model_part):
        self.assertEqual(ref_model_part.NumberOfNodes(), result_model_part.NumberOfNodes())
        self.assertEqual(ref_model_part.NumberOfElements(), result_model_part.NumberOfElements())
        self.assertEqual(ref_model_part.NumberOfConditions(), result_model_part.NumberOfConditions())

    def CheckModelPartHierarchie(self, model_part, hierarchie):
        """Checking if the hierarchie of a ModelPart matches the expected one
        This is intended to check larger models, where it is not feasible
        save large mdpa-files as references
        the hierarchie is a dict with the structure of the ModelPart. E.g.:
        {
            "name_model_part" : {
                "nodes": 15
                "elements": 11
                "conditions": 5
                "properties": 2,
                "sub_model_parts" : {
                    "domain" : {
                        "nodes": 15,
                        "elements" : 11,
                        "properties" :1
                        "sub_model_parts" : {
                            "sub_domain" : {
                                "nodes" : 3,
                                "elements" : 2
                            }
                        }
                    },
                    "boundary" : {
                        "nodes": 6
                        "conditions" : 5,
                        "properties" : 1
                    }
                    }
                }
            }
        }
        """
        def CheckModelPartHierarchieNumbers(smp, smp_hierarchie):
            comm = smp.GetCommunicator().GetDataCommunicator()
            local_number_of_nodes = smp.GetCommunicator().LocalMesh().NumberOfNodes()
            local_number_of_elem = smp.GetCommunicator().LocalMesh().NumberOfElements()
            local_number_of_cond = smp.GetCommunicator().LocalMesh().NumberOfConditions()
            local_number_of_prop = smp.GetCommunicator().LocalMesh().NumberOfProperties()

            exp_num = smp_hierarchie.get("nodes", 0)
            self.assertEqual(comm.SumAll(local_number_of_nodes), exp_num, msg='ModelPart "{}" is expected to have {} nodes but has {}'.format(smp.FullName(), exp_num, smp.NumberOfNodes()))

            exp_num = smp_hierarchie.get("elements", 0)
            self.assertEqual(comm.SumAll(local_number_of_elem), exp_num, msg='ModelPart "{}" is expected to have {} elements but has {}'.format(smp.FullName(), exp_num, smp.NumberOfElements()))

            exp_num = smp_hierarchie.get("conditions", 0)
            self.assertEqual(comm.SumAll(local_number_of_cond), exp_num, msg='ModelPart "{}" is expected to have {} conditions but has {}'.format(smp.FullName(), exp_num, smp.NumberOfConditions()))

            exp_num = smp_hierarchie.get("properties", 0)
            self.assertEqual(comm.SumAll(local_number_of_prop), exp_num, msg='ModelPart "{}" is expected to have {} properties but has {}'.format(smp.FullName(), exp_num, smp.NumberOfProperties()))

            if "sub_model_parts" in smp_hierarchie:
                smp_hierarchie = smp_hierarchie["sub_model_parts"]
                for name_smp in smp_hierarchie:
                    self.assertTrue(smp.HasSubModelPart(name_smp), msg='ModelPart "{}" does not have SubModelPart with name "{}"'.format(smp.FullName(), name_smp))
                    CheckModelPartHierarchieNumbers(smp.GetSubModelPart(name_smp), smp_hierarchie[name_smp])

        # check name of MainModelPart
        self.assertEqual(len(hierarchie), 1)
        name_main_model_part = hierarchie.__iter__().__next__()
        self.assertEqual(model_part.Name, name_main_model_part)

        CheckModelPartHierarchieNumbers(model_part, hierarchie[name_main_model_part])
Exemple #20
0
# makes KratosMultiphysics backward compatible with python 2.6 and 2.7
from __future__ import print_function, absolute_import, division

# Application dependent names and paths
from KratosMultiphysics.kratos_utilities import CheckIfApplicationsAvailable
from KratosMultiphysics import IsDistributedRun
if (IsDistributedRun()
        and CheckIfApplicationsAvailable("TrilinosApplication")):
    from KratosMultiphysics.TrilinosApplication import *
from KratosRANSApplication import *

from KratosMultiphysics import _ImportApplicationAsModule
application = KratosRANSApplication()
application_name = "KratosRANSApplication"
application_folder = "RANSApplication"

_ImportApplicationAsModule(application, application_name, application_folder,
                           __path__)
class TestDataCommunicatorFactory(UnitTest.TestCase):
    def setUp(self):
        self.registered_comms = []
        self.default_data_communicator = ParallelEnvironment.GetDefaultDataCommunicator(
        )
        self.original_default = ParallelEnvironment.GetDefaultDataCommunicatorName(
        )

    def tearDown(self):
        if len(self.registered_comms) > 0:
            ParallelEnvironment.SetDefaultDataCommunicator(
                self.original_default)
            for comm_name in self.registered_comms:
                ParallelEnvironment.UnregisterDataCommunicator(comm_name)

    def markForCleanUp(self, comm_name):
        self.registered_comms.append(comm_name)

    @UnitTest.skipUnless(IsDistributedRun(), "Test is distributed.")
    def testDataCommunicatorDuplication(self):
        duplicate_comm = DataCommunicatorFactory.DuplicateAndRegister(
            self.default_data_communicator, "Duplicate")
        self.markForCleanUp("Duplicate")  # to clean up during tearDown

        self.assertEqual(duplicate_comm.Rank(),
                         self.default_data_communicator.Rank())
        self.assertEqual(duplicate_comm.Size(),
                         self.default_data_communicator.Size())

    @UnitTest.skipUnless(IsDistributedRun(), "Test is distributed.")
    def testDataCommunicatorSplit(self):
        rank = self.default_data_communicator.Rank()
        size = self.default_data_communicator.Size()
        split_comm = DataCommunicatorFactory.SplitAndRegister(
            self.default_data_communicator, rank % 2, 0, "EvenOdd")
        self.markForCleanUp("EvenOdd")  # to clean up during tearDown

        expected_rank = rank // 2
        if rank % 2 == 0:
            expected_size = math.ceil(size / 2)
        else:
            expected_size = math.floor(size / 2)

        self.assertEqual(split_comm.Rank(), expected_rank)
        self.assertEqual(split_comm.Size(), expected_size)

    @UnitTest.skipUnless(IsDistributedRun()
                         and ParallelEnvironment.GetDefaultSize() > 1,
                         "Test requires at least two ranks.")
    def testDataCommunicatorCreateFromRange(self):
        rank = self.default_data_communicator.Rank()
        size = self.default_data_communicator.Size()

        # Create a communicator using all ranks except the first
        ranks = [i for i in range(1, size)]
        range_comm = DataCommunicatorFactory.CreateFromRanksAndRegister(
            self.default_data_communicator, ranks, "AllExceptFirst")
        self.markForCleanUp("AllExceptFirst")  # to clean up during tearDown

        if rank == 0:
            self.assertTrue(range_comm.IsNullOnThisRank())
            self.assertFalse(range_comm.IsDefinedOnThisRank())
        else:
            self.assertEqual(range_comm.Rank(), rank - 1)
            self.assertEqual(range_comm.Size(), size - 1)

    @UnitTest.skipUnless(IsDistributedRun()
                         and ParallelEnvironment.GetDefaultSize() > 2,
                         "Test requires at least three ranks.")
    def testDataCommunicatorCreateUnion(self):
        rank = self.default_data_communicator.Rank()
        size = self.default_data_communicator.Size()

        # Create a communicator using all ranks except the first
        all_except_first = DataCommunicatorFactory.CreateFromRanksAndRegister(
            self.default_data_communicator, [i for i in range(1, size)],
            "AllExceptFirst")
        self.markForCleanUp("AllExceptFirst")  # to clean up during tearDown
        all_except_last = DataCommunicatorFactory.CreateFromRanksAndRegister(
            self.default_data_communicator, [i for i in range(0, size - 1)],
            "AllExceptLast")
        self.markForCleanUp("AllExceptLast")  # to clean up during tearDown

        # Create union communicator (should contain all ranks)
        union_comm = DataCommunicatorFactory.CreateUnionAndRegister(
            all_except_first, all_except_last, self.default_data_communicator,
            "Union")
        self.markForCleanUp("Union")  # to clean up during tearDown

        self.assertFalse(union_comm.IsNullOnThisRank())
        self.assertEqual(union_comm.Rank(), rank)
        self.assertEqual(union_comm.Size(), size)

    @UnitTest.skipUnless(IsDistributedRun()
                         and ParallelEnvironment.GetDefaultSize() > 2,
                         "Test requires at least three ranks.")
    def testDataCommunicatorCreateIntersection(self):
        rank = self.default_data_communicator.Rank()
        size = self.default_data_communicator.Size()

        # Create a communicator using all ranks except the first
        all_except_first = DataCommunicatorFactory.CreateFromRanksAndRegister(
            self.default_data_communicator, [i for i in range(1, size)],
            "AllExceptFirst")
        self.markForCleanUp("AllExceptFirst")  # to clean up during tearDown
        all_except_last = DataCommunicatorFactory.CreateFromRanksAndRegister(
            self.default_data_communicator, [i for i in range(0, size - 1)],
            "AllExceptLast")
        self.markForCleanUp("AllExceptLast")  # to clean up during tearDown

        intersection_comm = DataCommunicatorFactory.CreateIntersectionAndRegister(
            all_except_first, all_except_last, self.default_data_communicator,
            "Intersection")
        self.markForCleanUp("Intersection")  # to clean up during tearDown

        if rank == 0 or rank == size - 1:
            # The first and last ranks do not participate in the intersection communicator
            self.assertTrue(intersection_comm.IsNullOnThisRank())
        else:
            self.assertEqual(intersection_comm.Rank(), rank - 1)
            self.assertEqual(intersection_comm.Size(), size - 2)