def test_HDF5NodalSolutionStepDataIO(self):
     with ControlledExecutionScope(os.path.dirname(os.path.realpath(__file__))):
         current_model = Model()
         write_model_part = current_model.CreateModelPart("write")
         KratosMPI.ModelPartCommunicatorUtilities.SetMPICommunicator(write_model_part)
         self._initialize_model_part(write_model_part)
         hdf5_file = self._get_file()
         hdf5_model_part_io = self._get_model_part_io(hdf5_file)
         hdf5_model_part_io.WriteModelPart(write_model_part)
         read_model_part = current_model.CreateModelPart("read")
         KratosMPI.ModelPartCommunicatorUtilities.SetMPICommunicator(read_model_part)
         hdf5_model_part_io.ReadModelPart(read_model_part)
         KratosMPI.ParallelFillCommunicator(read_model_part.GetRootModelPart()).Execute()
         hdf5_nodal_solution_step_data_io = self._get_nodal_solution_step_data_io(hdf5_file)
         hdf5_nodal_solution_step_data_io.WriteNodalResults(write_model_part.Nodes, 0)
         hdf5_nodal_solution_step_data_io.ReadNodalResults(read_model_part.Nodes, read_model_part.GetCommunicator(), 0)
         read_model_part.GetCommunicator().SynchronizeNodalSolutionStepsData()
         # Check data.
         for read_node, write_node in zip(read_model_part.Nodes, write_model_part.Nodes):
             self.assertEqual(read_node.GetSolutionStepValue(DISPLACEMENT_X), write_node.GetSolutionStepValue(DISPLACEMENT_X))
             self.assertEqual(read_node.GetSolutionStepValue(DISPLACEMENT_Y), write_node.GetSolutionStepValue(DISPLACEMENT_Y))
             self.assertEqual(read_node.GetSolutionStepValue(DISPLACEMENT_Z), write_node.GetSolutionStepValue(DISPLACEMENT_Z))
             self.assertEqual(read_node.GetSolutionStepValue(VELOCITY_X), write_node.GetSolutionStepValue(VELOCITY_X))
             self.assertEqual(read_node.GetSolutionStepValue(VELOCITY_Y), write_node.GetSolutionStepValue(VELOCITY_Y))
             self.assertEqual(read_node.GetSolutionStepValue(VELOCITY_Z), write_node.GetSolutionStepValue(VELOCITY_Z))
             self.assertEqual(read_node.GetSolutionStepValue(ACCELERATION_X), write_node.GetSolutionStepValue(ACCELERATION_X))
             self.assertEqual(read_node.GetSolutionStepValue(ACCELERATION_Y), write_node.GetSolutionStepValue(ACCELERATION_Y))
             self.assertEqual(read_node.GetSolutionStepValue(ACCELERATION_Z), write_node.GetSolutionStepValue(ACCELERATION_Z))
             self.assertEqual(read_node.GetSolutionStepValue(PRESSURE), write_node.GetSolutionStepValue(PRESSURE))
             self.assertEqual(read_node.GetSolutionStepValue(VISCOSITY), write_node.GetSolutionStepValue(VISCOSITY))
             self.assertEqual(read_node.GetSolutionStepValue(DENSITY), write_node.GetSolutionStepValue(DENSITY))
             self.assertEqual(read_node.GetSolutionStepValue(ACTIVATION_LEVEL), write_node.GetSolutionStepValue(ACTIVATION_LEVEL))
             self.assertEqual(read_node.GetSolutionStepValue(PARTITION_INDEX), write_node.GetSolutionStepValue(PARTITION_INDEX))
         kratos_utilities.DeleteFileIfExisting("test_hdf5_model_part_io_mpi.h5")
示例#2
0
    def setUp(self):
        self.model = KM.Model()
        self.model_part = self.model.CreateModelPart("default")
        self.model_part.AddNodalSolutionStepVariable(KM.PRESSURE)
        self.model_part.AddNodalSolutionStepVariable(KM.PARTITION_INDEX)
        self.dimension = 3
        self.model_part.ProcessInfo[KM.DOMAIN_SIZE] = self.dimension

        self.my_pid = KM.DataCommunicator.GetDefault().Rank()
        self.num_nodes = self.my_pid % 5 + 3  # num_nodes in range (3 ... 7)
        if self.my_pid == 4:
            self.num_nodes = 0  # in order to emulate one partition not having local nodes

        for i in range(self.num_nodes):
            node = self.model_part.CreateNewNode(
                i, 0.1 * i, 0.0, 0.0
            )  # this creates the same coords in different ranks, which does not matter for this test

            node.SetSolutionStepValue(KM.PARTITION_INDEX, self.my_pid)
            node.SetSolutionStepValue(KM.PRESSURE, uniform(-10, 50))

        if KM.IsDistributedRun():
            KratosMPI.ParallelFillCommunicator(self.model_part).Execute()

        data_settings = KM.Parameters("""{
            "model_part_name" : "default",
            "variable_name"   : "PRESSURE"
        }""")
        self.interface_data = CouplingInterfaceData(data_settings, self.model)
        self.interface_data.Initialize()

        self.dummy_solver_wrapper = DummySolverWrapper(
            {"data_4_testing": self.interface_data})
示例#3
0
def executeComputeArea_Task(pickled_model):
    # Unpickling model
    serialized_model = pickle.loads(pickled_model)

    # Unserializing model
    deserialized_model = KratosMultiphysics.Model()
    serialized_model.Load("ModelSerialization", deserialized_model)

    model_part = deserialized_model.GetModelPart("ModelPart").GetRootModelPart()
    ## Construct and execute the Parallel fill communicator
    ParallelFillCommunicator = KratosMPI.ParallelFillCommunicator(model_part)
    ParallelFillCommunicator.Execute()
    communicator = model_part.GetCommunicator()

    # Computing local areas
    for node in model_part.Nodes:
        node.SetValue(KratosMultiphysics.NODAL_AREA, 0.0)

    for elem in model_part.Elements:
        for node in elem.GetNodes():
            current_nodal_area = node.GetValue(KratosMultiphysics.NODAL_AREA)
            current_nodal_area += 1/3*elem.GetGeometry().Area()
            node.SetValue(KratosMultiphysics.NODAL_AREA, current_nodal_area)

    # Assembling nodal values
    communicator.AssembleNonHistoricalData(KratosMultiphysics.NODAL_AREA)

    # Computing sum of total area to check results.
    local_sum = sum(node.GetValue(KratosMultiphysics.NODAL_AREA) for node in model_part.Nodes if node.GetSolutionStepValue(KratosMultiphysics.PARTITION_INDEX) == communicator.GetDataCommunicator().Rank())
    total_sum = communicator.GetDataCommunicator().SumAll(local_sum)

    return total_sum
    def test_HDF5NodalFlagIO(self):
        with ControlledExecutionScope(
                os.path.dirname(os.path.realpath(__file__))):
            current_model = Model()
            write_model_part = current_model.CreateModelPart("write")
            KratosMPI.ModelPartCommunicatorUtilities.SetMPICommunicator(
                write_model_part)
            self._initialize_model_part(write_model_part)
            hdf5_file = self._get_file()
            hdf5_model_part_io = self._get_model_part_io(hdf5_file)
            hdf5_model_part_io.WriteModelPart(write_model_part)
            read_model_part = current_model.CreateModelPart("read")
            KratosMPI.ModelPartCommunicatorUtilities.SetMPICommunicator(
                read_model_part)
            hdf5_model_part_io.ReadModelPart(read_model_part)
            KratosMPI.ParallelFillCommunicator(
                read_model_part.GetRootModelPart()).Execute()
            hdf5_nodal_flag_io = self._get_nodal_flag_io(hdf5_file)
            hdf5_nodal_flag_io.WriteNodalFlags(write_model_part.Nodes)
            hdf5_nodal_flag_io.ReadNodalFlags(
                read_model_part.Nodes, read_model_part.GetCommunicator())

            # # Check flag.
            for read_node, write_node in zip(read_model_part.Nodes,
                                             write_model_part.Nodes):
                self.assertEqual(read_node.Is(SLIP), write_node.Is(SLIP))
                self.assertEqual(read_node.Is(ACTIVE), write_node.Is(ACTIVE))
            kratos_utilities.DeleteFileIfExisting(
                "test_hdf5_model_part_io_mpi.h5")
示例#5
0
def ImportChimeraModelparts(main_modelpart, chimera_mp_import_settings_list, material_file="", parallel_type="OpenMP"):
    '''
        This function extends the functionalities of the
        mpda_manipulator from: https://github.com/philbucher/mdpa-manipulator

        main_modelpart      : The modelpart to which the new modelparts are appended to.
        chimera_mp_import_settings_list   : The list of import setting for all chimera modelparts. each entry has the following format:
        {
            "model_import_settings":{
                    "input_type": "mdpa",
                    "input_filename": "SOME"
            },
            "echo_level":1
        }
    '''
    if parallel_type == "OpenMP":
        for mp_import_setting in chimera_mp_import_settings_list:
            mdpa_file_name = mp_import_setting["input_filename"].GetString()
            if mdpa_file_name.endswith('.mdpa'):
                mdpa_file_name = mdpa_file_name[:-5]

            model = KratosMultiphysics.Model()
            model_part = model.CreateModelPart("new_modelpart")
            KratosChimera.TransferSolutionStepData(main_modelpart, model_part)

            ReadModelPart(mdpa_file_name, model_part, material_file)
            AddModelPart(main_modelpart, model_part)
    elif(parallel_type == "MPI"):
        input_settings = KratosMultiphysics.Parameters("""{
        "model_import_settings":{
            "input_type": "mdpa",
            "input_filename": "SOME"
        },
        "echo_level":1
        }""")

        for mp_import_setting in chimera_mp_import_settings_list:
            model = KratosMultiphysics.Model()
            model_part = model.CreateModelPart("new_modelpart")
            KratosChimera.TransferSolutionStepData(main_modelpart, model_part)
            model_part.AddNodalSolutionStepVariable(KratosMultiphysics.PARTITION_INDEX)
            mdpa_file_name = mp_import_setting["input_filename"].GetString()
            if mdpa_file_name.endswith('.mdpa'):
                mdpa_file_name = mdpa_file_name[:-5]
            mp_import_setting["input_filename"].SetString(mdpa_file_name)
            input_settings["model_import_settings"] = mp_import_setting

            from KratosMultiphysics.mpi import distributed_import_model_part_utility

            mpi_import_utility = distributed_import_model_part_utility.DistributedImportModelPartUtility(model_part, input_settings)
            mpi_import_utility.ImportModelPart()
            #mpi_import_utility.CreateCommunicators()
            AddModelPart(main_modelpart, model_part, is_mpi=True)

            ## Construct and execute the Parallel fill communicator (also sets the MPICommunicator)
            import KratosMultiphysics.mpi as KratosMPI
            ParallelFillCommunicator = KratosMPI.ParallelFillCommunicator(main_modelpart.GetRootModelPart())
            ParallelFillCommunicator.Execute()
    def CreateCommunicators(self):
        ## Construct and execute the Parallel fill communicator (also sets the MPICommunicator)
        ParallelFillCommunicator = KratosMPI.ParallelFillCommunicator(
            self.main_model_part.GetRootModelPart())
        ParallelFillCommunicator.Execute()

        KratosMultiphysics.Logger.PrintInfo(
            "::[DistributedImportModelPartUtility]::",
            "MPI communicators constructed.")
 def test_HDF5ModelPartIO(self):
     with ControlledExecutionScope(os.path.dirname(os.path.realpath(__file__))):
         current_model = Model()
         write_model_part = current_model.CreateModelPart("write")
         KratosMPI.ModelPartCommunicatorUtilities.SetMPICommunicator(write_model_part)
         self._initialize_model_part(write_model_part)
         hdf5_file = self._get_file()
         hdf5_model_part_io = self._get_model_part_io(hdf5_file)
         hdf5_model_part_io.WriteModelPart(write_model_part)
         read_model_part = current_model.CreateModelPart("read")
         KratosMPI.ModelPartCommunicatorUtilities.SetMPICommunicator(read_model_part)
         hdf5_model_part_io.ReadModelPart(read_model_part)
         KratosMPI.ParallelFillCommunicator(read_model_part.GetRootModelPart()).Execute()
         read_model_part.GetCommunicator().SynchronizeNodalSolutionStepsData()
         # Check nodes (node order should be preserved on read/write to ensure consistency with nodal results)
         self.assertEqual(read_model_part.NumberOfNodes(), write_model_part.NumberOfNodes())
         for read_node, write_node in zip(read_model_part.Nodes, write_model_part.Nodes):
             self.assertEqual(read_node.Id, write_node.Id)
             self.assertEqual(read_node.X, write_node.X)
             self.assertEqual(read_node.Y, write_node.Y)
             self.assertEqual(read_node.Z, write_node.Z)
         # Check elements
         self.assertEqual(read_model_part.NumberOfElements(), write_model_part.NumberOfElements())
         first_elem_id = next(iter(read_model_part.Elements)).Id
         read_model_part.GetElement(first_elem_id) # Force a sort since order is mixed by openmp.
         for read_elem, write_elem in zip(read_model_part.Elements, write_model_part.Elements):
             self.assertEqual(read_elem.Id, write_elem.Id)
             self.assertEqual(read_elem.Properties.Id, write_elem.Properties.Id)
             self.assertEqual(len(read_elem.GetNodes()), len(write_elem.GetNodes()))
             for read_elem_node, write_elem_node in zip(read_elem.GetNodes(), write_elem.GetNodes()):
                 self.assertEqual(read_elem_node.Id, write_elem_node.Id)
         # Check conditions
         self.assertEqual(read_model_part.NumberOfConditions(), write_model_part.NumberOfConditions())
         first_cond_id = next(iter(read_model_part.Conditions)).Id
         read_model_part.GetCondition(first_cond_id) # Force a sort since order is mixed by openmp.
         for read_cond, write_cond in zip(read_model_part.Conditions, write_model_part.Conditions):
             self.assertEqual(read_cond.Id, write_cond.Id)
             self.assertEqual(read_cond.Properties.Id, write_cond.Properties.Id)
             self.assertEqual(len(read_cond.GetNodes()), len(write_cond.GetNodes()))
             for read_cond_node, write_cond_node in zip(read_cond.GetNodes(), write_cond.GetNodes()):
                 self.assertEqual(read_cond_node.Id, write_cond_node.Id)
         # Check process info
         self.assertEqual(read_model_part.ProcessInfo[DOMAIN_SIZE], write_model_part.ProcessInfo[DOMAIN_SIZE])
         self.assertEqual(read_model_part.ProcessInfo[TIME], write_model_part.ProcessInfo[TIME])
         read_vector = read_model_part.ProcessInfo[INITIAL_STRAIN]
         write_vector = write_model_part.ProcessInfo[INITIAL_STRAIN]
         self.assertEqual(read_vector.Size(), write_vector.Size())
         for i in range(len(read_vector)):
             self.assertEqual(read_vector[i], write_vector[i])
         read_matrix = read_model_part.ProcessInfo[GREEN_LAGRANGE_STRAIN_TENSOR]
         write_matrix = write_model_part.ProcessInfo[GREEN_LAGRANGE_STRAIN_TENSOR]
         self.assertEqual(read_matrix.Size1(), write_matrix.Size1())
         self.assertEqual(read_matrix.Size2(), write_matrix.Size2())
         for i in range(read_matrix.Size1()):
             for j in range(read_matrix.Size2()):
                 self.assertEqual(read_matrix[i,j], write_matrix[i,j])
         kratos_utilities.DeleteFileIfExisting("test_hdf5_model_part_io_mpi.h5")
    def _read_model_part_mpi(self, main_model_part):

        if self.communicator.Size() == 1:
            self.skipTest(
                "Test can be run only using more than one mpi process")

        ## Add variables to the model part
        main_model_part.AddNodalSolutionStepVariable(
            KratosMultiphysics.DENSITY)
        main_model_part.AddNodalSolutionStepVariable(
            KratosMultiphysics.VISCOSITY)
        main_model_part.AddNodalSolutionStepVariable(
            KratosMultiphysics.DISPLACEMENT)
        main_model_part.AddNodalSolutionStepVariable(
            KratosMultiphysics.PARTITION_INDEX)

        ## Serial partition of the original .mdpa file
        input_filename = "test_mpi_communicator"
        if self.communicator.Rank() == 0:

            # Original .mdpa file reading
            model_part_io = KratosMultiphysics.ModelPartIO(input_filename)

            # Partition of the original .mdpa file
            number_of_partitions = self.communicator.Size(
            )  # Number of partitions equals the number of processors
            domain_size = main_model_part.ProcessInfo[
                KratosMultiphysics.DOMAIN_SIZE]
            verbosity = 0
            sync_conditions = True  # Make sure that the condition goes to the same partition as the element is a face of

            partitioner = KratosMetis.MetisDivideHeterogeneousInputProcess(
                model_part_io, number_of_partitions, domain_size, verbosity,
                sync_conditions)
            partitioner.Execute()

            KratosMultiphysics.Logger.PrintInfo("TestMPICommunicator",
                                                "Metis divide finished.")

        self.communicator.Barrier()

        ## Read the partitioned .mdpa files
        mpi_input_filename = input_filename + "_" + str(
            self.communicator.Rank())
        model_part_io = KratosMultiphysics.ModelPartIO(mpi_input_filename)
        model_part_io.ReadModelPart(main_model_part)

        ## Construct and execute the Parallel fill communicator
        ParallelFillCommunicator = KratosMPI.ParallelFillCommunicator(
            main_model_part.GetRootModelPart())
        ParallelFillCommunicator.Execute()

        ## Check submodelpart of each main_model_part of each processor
        self.assertTrue(main_model_part.HasSubModelPart("Skin"))
        skin_sub_model_part = main_model_part.GetSubModelPart("Skin")
示例#9
0
    def setUp(self):
        self.model = KM.Model()
        self.root_model_part = self.model.CreateModelPart("root_mp")
        self.root_model_part.AddNodalSolutionStepVariable(KM.PARTITION_INDEX)
        self.dimension = 3

        self.root_model_part.CreateNewProperties(0)

        data_comm = KM.DataCommunicator.GetDefault()
        my_pid = data_comm.Rank()
        self.num_nodes = my_pid % 5 + 4  # num_nodes in range (4 ... 8)
        if my_pid == 4:
            self.num_nodes = 0  # in order to emulate one partition not having local nodes

        smp_nodes_1 = self.root_model_part.CreateSubModelPart("smp_nodes_1")
        smp_nodes_2 = self.root_model_part.CreateSubModelPart("smp_nodes_2")
        smp_nodes_3 = self.root_model_part.CreateSubModelPart("smp_nodes_3")

        scan_sum_num_nodes = data_comm.ScanSum(self.num_nodes)

        if self.num_nodes > 0:
            for i_node, id_node in enumerate(
                    range(scan_sum_num_nodes - self.num_nodes,
                          scan_sum_num_nodes)):
                # this creates the same coords in different ranks, which does not matter for this test
                if i_node < self.num_nodes - 2:  # last two nodes go to other sub-model-part
                    smp_nodes_1.CreateNewNode(id_node + 1, 0.1 * i_node, 0.0,
                                              0.0)
                else:
                    smp_nodes_2.CreateNewNode(id_node + 1, 1.3, 0.1 * i_node,
                                              0.0)

        for node in self.root_model_part.Nodes:
            node.SetSolutionStepValue(KM.PARTITION_INDEX, my_pid)

        # adding the first node of each smp to another smp to emulate an "overlapping" interface
        for node in smp_nodes_1.Nodes:
            smp_nodes_3.AddNodes([node.Id])
            break
        for node in smp_nodes_2.Nodes:
            smp_nodes_3.AddNodes([node.Id])
            break

        if KM.IsDistributedRun():
            KratosMPI.ParallelFillCommunicator(self.root_model_part).Execute()
    def ReadModelPart(self, filename):
        model = KratosMultiphysics.Model()
        model_part = model.CreateModelPart("Test ModelPart")

        model_part.AddNodalSolutionStepVariable(
            KratosMultiphysics.PARTITION_INDEX)
        model_part.AddNodalSolutionStepVariable(
            KratosMultiphysics.DISPLACEMENT)
        model_part.AddNodalSolutionStepVariable(KratosMultiphysics.FORCE)
        model_part.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION)
        model_part.AddNodalSolutionStepVariable(
            KratosMultiphysics.FSI_INTERFACE_RESIDUAL)
        KratosMultiphysics.DataCommunicator.GetDefault().Barrier()

        model_part_io = KratosMultiphysics.ModelPartIO(filename)
        model_part_io.ReadModelPart(model_part)
        model_part.SetBufferSize(2)

        KratosMPI.ParallelFillCommunicator(model_part).Execute()

        return model_part
 def _ExecuteAfterLoad(self):
     if self.set_mpi_communicator:
         KratosMPI.ParallelFillCommunicator(self.main_model_part.GetRootModelPart()).Execute()
 def _initialize_model_part(self, model_part):
     # Add variables.
     model_part.AddNodalSolutionStepVariable(DISPLACEMENT) # array_1d
     model_part.AddNodalSolutionStepVariable(VELOCITY)
     model_part.AddNodalSolutionStepVariable(ACCELERATION)
     model_part.AddNodalSolutionStepVariable(PRESSURE) # double
     model_part.AddNodalSolutionStepVariable(VISCOSITY)
     model_part.AddNodalSolutionStepVariable(DENSITY)
     model_part.AddNodalSolutionStepVariable(ACTIVATION_LEVEL) # int
     model_part.AddNodalSolutionStepVariable(PARTITION_INDEX)
     # Make a mesh out of two structured rings (inner triangles, outer quads).
     num_proc = DataCommunicator.GetDefault().Size()
     my_pid = DataCommunicator.GetDefault().Rank()
     my_num_quad = 20 # user-defined.
     my_num_tri = 2 * my_num_quad # splits each quad into 2 triangles.
     num_local_nodes = 3 * my_num_quad
     num_ghost_nodes = 3
     local_start_index = num_local_nodes * my_pid + 1
     ghost_start_index = local_start_index + num_local_nodes
     local_node_ids = list(range(local_start_index, local_start_index + num_local_nodes))
     ghost_node_ids = list(range(ghost_start_index, ghost_start_index + num_ghost_nodes))
     partition_index = dict()
     for i in local_node_ids:
         partition_index[i] = my_pid
     if (my_pid == num_proc - 1): # Connect ring start and ring end.
         ghost_node_ids = [1, 2, 3]
     for i in ghost_node_ids:
         partition_index[i] = (my_pid + 1) % num_proc
     node_ids = local_node_ids + ghost_node_ids
     # Create nodes.
     for i in node_ids:
         radius = 0.5 + 0.5 * ((i - 1) % 3) / 2.0
         phase = 2.0 * math.pi * ((i - 1) // 3) / float(my_num_quad * num_proc)
         x = radius * math.cos(phase)
         y = radius * math.sin(phase)
         model_part.CreateNewNode(i, x, y, 0.0)
     # Create elements and conditions.
     for i in range(0, num_local_nodes, 3):
         prop_id = 1
         prop = model_part.GetProperties()[prop_id]
         # First triangle.
         eid = local_start_index + 3 * (i // 3)
         nids = [node_ids[i], node_ids[i + 1], node_ids[i + 4]]
         model_part.CreateNewElement("Element2D3N", eid, nids, prop)
         model_part.CreateNewCondition("SurfaceCondition3D3N", eid, nids, prop)
         # Second triangle.
         eid = eid + 1
         nids = [node_ids[i], node_ids[i + 4], node_ids[i + 3]]
         model_part.CreateNewElement("Element2D3N", eid, nids, prop)
         model_part.CreateNewCondition("SurfaceCondition3D3N", eid, nids, prop)
         # Quad.
         eid = eid + 1
         nids = [node_ids[i + 1], node_ids[i + 2], node_ids[i + 5], node_ids[i + 4]]
         model_part.CreateNewElement("Element2D4N", eid, nids, prop)
         model_part.CreateNewCondition("SurfaceCondition3D4N", eid, nids, prop)
     if my_pid == 0:
         # Here we create a special condition that only exists on the first
         # process. This is to test the collective write when at least one
         # process has an empty set.
         model_part.CreateNewCondition("LineCondition2D2N", eid + 1, [node_ids[i + 1], node_ids[i + 2]], prop)
     model_part.SetBufferSize(2)
     # Write some data to the nodal solution steps variables.
     for node in model_part.Nodes:
         node.SetSolutionStepValue(PARTITION_INDEX, partition_index[node.Id])
         node.SetSolutionStepValue(DISPLACEMENT_X, random.random())
         node.SetSolutionStepValue(DISPLACEMENT_Y, random.random())
         node.SetSolutionStepValue(DISPLACEMENT_Z, random.random())
         node.SetSolutionStepValue(VELOCITY_X, random.random())
         node.SetSolutionStepValue(VELOCITY_Y, random.random())
         node.SetSolutionStepValue(VELOCITY_Z, random.random())
         node.SetSolutionStepValue(ACCELERATION_X, random.random())
         node.SetSolutionStepValue(ACCELERATION_Y, random.random())
         node.SetSolutionStepValue(ACCELERATION_Z, random.random())
         node.SetSolutionStepValue(PRESSURE, random.random())
         node.SetSolutionStepValue(VISCOSITY, random.random())
         node.SetSolutionStepValue(DENSITY, random.random())
         node.SetSolutionStepValue(ACTIVATION_LEVEL, random.randint(-100, 100))
     KratosMPI.ParallelFillCommunicator(model_part.GetRootModelPart()).Execute()
     model_part.GetCommunicator().SynchronizeNodalSolutionStepsData()
     # Set some process info variables.
     model_part.ProcessInfo[DOMAIN_SIZE] = 3 # int
     model_part.ProcessInfo[TIME] = 1.2345 # float
     initial_strain = Vector(6)
     for i in range(6):
         initial_strain[i] = math.cos(i)
     model_part.ProcessInfo[INITIAL_STRAIN] = initial_strain # vector
     gl_strain_tensor = Matrix(5,5)
     for i in range(5):
         for j in range(5):
             gl_strain_tensor[i,j] = math.cos(i + j)
     model_part.ProcessInfo[GREEN_LAGRANGE_STRAIN_TENSOR] = gl_strain_tensor # matrix