示例#1
0
def test_simple_remote_workflow(simple_bar, local_server):
    data_sources1 = core.DataSources(simple_bar)
    wf = core.Workflow()
    op = ops.result.displacement(data_sources=data_sources1)
    average = core.operators.math.norm_fc(op)

    wf.add_operators([op, average])
    wf.set_output_name("out", average.outputs.fields_container)

    local_wf = core.Workflow()
    min_max = ops.min_max.min_max_fc()
    local_wf.add_operator(min_max)
    local_wf.set_input_name("in", min_max.inputs.fields_container)
    local_wf.set_output_name("tot_output", min_max.outputs.field_max)

    grpc_stream_provider = ops.metadata.streams_provider()
    grpc_data_sources = core.DataSources()
    grpc_data_sources.set_result_file_path(
        local_server.ip + ":" + str(local_server.port), "grpc")
    grpc_stream_provider.inputs.data_sources(grpc_data_sources)

    remote_workflow_prov = core.Operator("remote_workflow_instantiate")
    remote_workflow_prov.connect(3, grpc_stream_provider, 0)
    remote_workflow_prov.connect(0, wf)

    remote_workflow = remote_workflow_prov.get_output(0, core.types.workflow)

    local_wf.connect_with(remote_workflow, ("out", "in"))
    max = local_wf.get_output("tot_output", core.types.field)
    assert np.allclose(max.data, [2.52368345e-05])
示例#2
0
def test_multi_process_chain_remote_workflow():
    files = examples.download_distributed_files()
    wf = core.Workflow()
    op = ops.result.displacement()
    average = core.operators.math.norm_fc(op)

    wf.add_operators([op, average])
    wf.set_input_name("data_sources", op.inputs.data_sources)
    wf.set_output_name("distrib", average.outputs.fields_container)
    workflows = []
    for i in files:
        data_sources1 = core.DataSources(files[i])

        grpc_stream_provider = ops.metadata.streams_provider()
        grpc_data_sources = core.DataSources()
        grpc_data_sources.set_result_file_path(
            local_servers[i].ip + ":" + str(local_servers[i].port), "grpc")
        grpc_stream_provider.inputs.data_sources(grpc_data_sources)

        remote_workflow_prov = core.Operator("remote_workflow_instantiate")
        remote_workflow_prov.connect(3, grpc_stream_provider, 0)
        remote_workflow_prov.connect(0, wf)
        remote_workflow = remote_workflow_prov.get_output(
            0, core.types.workflow)

        remote_workflow.connect("data_sources", data_sources1)
        workflows.append(remote_workflow)

    local_wf = core.Workflow()
    merge = ops.utility.merge_fields_containers()
    min_max = ops.min_max.min_max_fc(merge)
    local_wf.add_operator(merge)
    local_wf.add_operator(min_max)
    local_wf.set_output_name("tot_output", min_max.outputs.field_max)
    for i, wf in enumerate(workflows):
        local_wf.set_input_name("distrib" + str(i), merge, i)
    grpc_stream_provider = ops.metadata.streams_provider()
    grpc_data_sources = core.DataSources()
    grpc_data_sources.set_result_file_path(
        local_servers[2].ip + ":" + str(local_servers[2].port), "grpc")
    grpc_stream_provider.inputs.data_sources(grpc_data_sources)

    remote_workflow_prov = core.Operator("remote_workflow_instantiate")
    remote_workflow_prov.connect(3, grpc_stream_provider, 0)
    remote_workflow_prov.connect(0, local_wf)
    remote_workflow = remote_workflow_prov.get_output(0, core.types.workflow)

    for i, wf in enumerate(workflows):
        remote_workflow.connect_with(wf, ("distrib", "distrib" + str(i)))

    max = remote_workflow.get_output("tot_output", core.types.field)
    assert np.allclose(max.data, [10.03242272])
示例#3
0
def test_create_op_in_chain(plate_msup):
    ds = core.DataSources(plate_msup)
    s = op.result.stress(time_scoping=0.015, data_sources=ds)
    eqv1 = op.invariant.von_mises_eqv_fc(op.averaging.to_nodal_fc(s))
    fc1 = eqv1.outputs.fields_container()
    eqv2 = op.invariant.von_mises_eqv_fc(
        op.result.stress(
            time_scoping=0.015, data_sources=ds, requested_location=core.locations.nodal
        )
    )
    fc2 = eqv2.outputs.fields_container()
    identical = op.logic.identical_fc(fc1, fc2)
    out = identical.outputs.boolean()
    assert out == True
    identical = op.logic.identical_fc(eqv2, eqv1)
    out = identical.outputs.boolean()
    assert out == True
    identical = op.logic.identical_fc(eqv2.outputs, eqv1.outputs)
    out = identical.outputs.boolean()
    assert out == True
    identical = op.logic.identical_fc(
        eqv2.outputs.fields_container, eqv1.outputs.fields_container
    )
    out = identical.outputs.boolean()
    assert out == True
示例#4
0
def cff_data_sources():
    """Create a data sources with a cas and a dat file of fluent"""
    ds = core.DataSources()
    files = examples.download_fluent_files()
    ds.set_result_file_path(files["cas"], "cas")
    ds.add_file_path(files["dat"], "dat")
    return ds
示例#5
0
def test_multi_process_transparent_api_create_on_local_remote_ith_address_workflow(
):
    files = examples.download_distributed_files()
    wf = core.Workflow()
    op = ops.result.displacement()
    average = core.operators.math.norm_fc(op)

    wf.add_operators([op, average])
    wf.set_output_name("distrib", average.outputs.fields_container)
    wf.set_input_name("ds", op.inputs.data_sources)

    local_wf = core.Workflow()
    merge = ops.utility.merge_fields_containers()
    min_max = ops.min_max.min_max_fc(merge)
    local_wf.add_operator(merge)
    local_wf.add_operator(min_max)
    local_wf.set_output_name("tot_output", min_max.outputs.field_max)

    for i in files:
        data_sources1 = core.DataSources(files[i])
        remote_wf = wf.create_on_other_server(ip=local_servers[i].ip,
                                              port=local_servers[i].port)
        remote_wf.connect("ds", data_sources1)
        local_wf.set_input_name("distrib" + str(i), merge, i)
        local_wf.connect_with(remote_wf, ("distrib", "distrib" + str(i)))

    max = local_wf.get_output("tot_output", core.types.field)
    assert np.allclose(max.data, [10.03242272])
示例#6
0
def test_multi_process_transparent_api_connect_local_op_remote_workflow():
    files = examples.download_distributed_files()
    workflows = []
    for i in files:
        wf = core.Workflow(server=local_servers[i])
        op = ops.result.displacement(server=local_servers[i])
        average = core.operators.math.norm_fc(op, server=local_servers[i])

        wf.add_operators([op, average])
        wf.set_output_name("distrib" + str(i),
                           average.outputs.fields_container)
        wf.set_input_name("ds", op.inputs.data_sources)
        workflows.append(wf)

    local_wf = core.Workflow()
    merge = ops.utility.merge_fields_containers()
    min_max = ops.min_max.min_max_fc(merge)
    local_wf.add_operator(merge)
    local_wf.add_operator(min_max)
    local_wf.set_output_name("tot_output", min_max.outputs.field_max)

    for i, wf in enumerate(workflows):
        data_sources1 = core.DataSources(files[i])
        forward = ops.utility.forward(data_sources1)
        wf.connect("ds", forward, 0)
        local_wf.set_input_name("distrib" + str(i), merge, i)
        local_wf.connect_with(wf)

    max = local_wf.get_output("tot_output", core.types.field)
    assert np.allclose(max.data, [10.03242272])
示例#7
0
def test_workflowwithgeneratedcode(allkindofcomplexity):
    disp = core.operators.result.displacement()
    ds = core.DataSources(allkindofcomplexity)
    nodes = [1]
    scop = core.Scoping()
    scop.ids = nodes
    scop.location = "Nodal"
    disp.inputs.data_sources.connect(ds)
    disp.inputs.mesh_scoping.connect(scop)
    a = disp.outputs.fields_container.get_data()
    assert a[0].data[0][0] == 7.120546307743541e-07
    assert len(a[0].data[0]) == 3
    assert len(a[0].data) == 1
    norm = core.operators.math.norm()
    norm.inputs.field.connect(disp.outputs.fields_container)
    b = norm.outputs.field()
    assert b.data[0] == 1.26387078548793e-06
    filt = core.operators.filter.scoping_high_pass()
    filt.inputs.field.connect(norm.outputs.field)
    filt.inputs.threshold.connect(1e-05)
    pow_op = core.operators.math.pow()
    pow_op.inputs.factor.connect(3.0)
    pow_op.inputs.field.connect(norm.outputs.field)
    d = pow_op.outputs.field.get_data()
    assert d.data[0] == 2.0188684707833254e-18
示例#8
0
def engineering_data_sources():
    """Resolve the path of the "model_with_ns.rst" result file."""
    ds = core.DataSources(resolve_test_file("file.rst", "engineeringData"))
    ds.add_file_path(resolve_test_file("MatML.xml", "engineeringData"),
                     "EngineeringData")
    ds.add_file_path(resolve_test_file("ds.dat", "engineeringData"), "dat")
    return ds
示例#9
0
def test_calloperators(allkindofcomplexity):
    my_data_sources = core.DataSources(allkindofcomplexity)
    my_model = core.Model(my_data_sources)
    displacement_op = my_model.results.displacement()
    assert isinstance(displacement_op, ansys.dpf.core.dpf_operator.Operator)
    norm_op = my_model.operator("norm_fc")
    assert isinstance(norm_op, ansys.dpf.core.dpf_operator.Operator)
    square_op = core.operators.math.sqr()
    assert isinstance(square_op, ansys.dpf.core.operators.math.sqr)
示例#10
0
def test_lsdyna(d3plot):
    dpf.load_library("Ans.Dpf.LSDYNA.dll", "lsdyna")
    ds = dpf.DataSources()
    ds.set_result_file_path(d3plot, "d3plot")
    streams = dpf.operators.metadata.streams_provider(ds)
    u = dpf.operators.result.displacement()
    u.inputs.streams_container(streams)
    fc = u.outputs.fields_container()
    assert len(fc[0]) == 3195
示例#11
0
def test_get_result(allkindofcomplexity):
    stress = core.operators.result.stress_X()
    ds = core.DataSources(allkindofcomplexity)
    stress.inputs.data_sources.connect(ds)
    stress.inputs.requested_location.connect("Nodal")
    avg = core.operators.averaging.to_elemental_fc()
    avg.inputs.fields_container.connect(stress.outputs.fields_container)
    out = avg.outputs.fields_container()
    assert len(out) == 2
    assert len(out[0]) == 1281
    assert np.isclose(out[0].data[3], 9328792.294959497)
示例#12
0
def test_physics_type_cache(simple_bar):
    ds = dpf.DataSources(simple_bar)
    provider = dpf.operators.metadata.result_info_provider(data_sources=ds)
    res_info = provider.outputs.result_info()
    assert len(res_info._cache.cached) == 0
    res_info.unit_system
    assert len(res_info._cache.cached) == 1
    res_info.physics_type
    if server_meet_version("3.0", ds._server):
        assert len(res_info._cache.cached) == 2
    else:
        assert len(res_info._cache.cached) == 1
示例#13
0
def test_plot_meshes_container_1(multishells):
    model = core.Model(multishells)
    mesh = model.metadata.meshed_region
    split_mesh_op = core.Operator("split_mesh")
    split_mesh_op.connect(7, mesh)
    split_mesh_op.connect(13, "mat")
    meshes_cont = split_mesh_op.get_output(0, core.types.meshes_container)
    disp_op = core.Operator("U")
    disp_op.connect(7, meshes_cont)
    ds = core.DataSources(multishells)
    disp_op.connect(4, ds)
    disp_fc = disp_op.outputs.fields_container()
    meshes_cont.plot(disp_fc)
示例#14
0
def test_makeconnections(allkindofcomplexity):
    my_data_sources = core.DataSources(allkindofcomplexity)
    my_model = core.Model(my_data_sources)
    displacement_op = my_model.results.displacement()
    norm_op = my_model.operator("norm")
    square_op = core.operators.math.sqr()
    assert len(displacement_op.inputs._connected_inputs) == 2
    assert len(norm_op.inputs._connected_inputs) == 0
    # assert len(square_op.inputs._connected_inputs)==0
    norm_op.inputs.connect(displacement_op.outputs)
    square_op.inputs.field.connect(norm_op.outputs.field)
    assert len(displacement_op.inputs._connected_inputs) == 2
    assert len(norm_op.inputs._connected_inputs) == 1
    assert len(square_op.inputs._connected_inputs) == 1
    square_op.inputs.connect(norm_op.outputs.field)
    assert len(square_op.inputs._connected_inputs) == 1
    square_op.inputs.field.connect(norm_op.outputs)
    assert len(square_op.inputs._connected_inputs) == 1
示例#15
0
def test_remote_workflow_info(local_server):
    wf = core.Workflow()
    op = ops.result.displacement()
    average = core.operators.math.norm_fc(op)

    wf.add_operators([op, average])
    wf.set_input_name("data_sources", op.inputs.data_sources)
    wf.set_output_name("distrib", average.outputs.fields_container)
    grpc_stream_provider = ops.metadata.streams_provider()
    grpc_data_sources = core.DataSources()
    grpc_data_sources.set_result_file_path(
        local_server.ip + ":" + str(local_server.port), "grpc")
    grpc_stream_provider.inputs.data_sources(grpc_data_sources)
    remote_workflow_prov = core.Operator("remote_workflow_instantiate")
    remote_workflow_prov.connect(3, grpc_stream_provider, 0)
    remote_workflow_prov.connect(0, wf)
    remote_workflow = remote_workflow_prov.get_output(0, core.types.workflow)
    assert "data_sources" in remote_workflow.input_names
    assert "distrib" in remote_workflow.output_names
示例#16
0
def test_create_op_with_inputs(plate_msup):
    ds = core.DataSources(plate_msup)
    u = core.operators.result.displacement(time_scoping=0.015, data_sources=ds)
    norm = core.operators.math.norm_fc(u)
    fc = norm.outputs.fields_container()
    assert len(fc) == 1
    assert np.allclose(fc[0].data[0], [0.00036435444541115566])
    u = core.operators.result.displacement(time_scoping=0.025, data_sources=ds)
    fc = u.outputs.fields_container()
    assert len(fc) == 1
    assert np.allclose(fc[0].data[0], [1.50367127e-13, 8.96539310e-04, 1.62125644e-05])
    u = core.operators.result.displacement(time_scoping=[0.015, 0.025], data_sources=ds)
    fc = u.outputs.fields_container()
    assert len(fc) == 2
    assert np.allclose(fc[0].data[0], [5.12304110e-14, 3.64308310e-04, 5.79805917e-06])
    assert np.allclose(fc[1].data[0], [1.50367127e-13, 8.96539310e-04, 1.62125644e-05])
    u = core.operators.result.displacement(time_scoping=1, data_sources=ds)
    fc = u.outputs.fields_container()
    assert len(fc) == 1
    assert np.allclose(fc[0].data[0], [1.62364553e-14, 1.47628321e-04, 1.96440004e-06])
示例#17
0
def test_plot_meshes_container_2(multishells):
    from ansys.dpf import core
    model = core.Model(multishells)
    mesh = model.metadata.meshed_region
    split_mesh_op = core.Operator("split_mesh")
    split_mesh_op.connect(7, mesh)
    split_mesh_op.connect(13, "mat")
    meshes_cont = split_mesh_op.get_output(0, core.types.meshes_container)
    disp_op = core.Operator("U")
    disp_op.connect(7, meshes_cont)
    ds = core.DataSources(multishells)
    disp_op.connect(4, ds)
    disp_fc = disp_op.outputs.fields_container()
    meshes_cont_2 = core.MeshesContainer()
    meshes_cont_2.labels = meshes_cont.labels
    disp_fc_2 = core.FieldsContainer()
    disp_fc_2.labels = meshes_cont.labels
    for i in range(len(meshes_cont) - 10):
        lab = meshes_cont.get_label_space(i)
        meshes_cont_2.add_mesh(lab, meshes_cont.get_mesh(lab))
        disp_fc_2.add_field(lab, disp_fc.get_field(lab))
    meshes_cont_2.plot(disp_fc_2)
示例#18
0
os.remove(file_path)

###############################################################################
# Download CSV Result File
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# Download the file ``simple_bar_fc.csv``:

downloaded_client_file_path = os.getcwd() + "\\simple_bar_fc_downloaded.csv"
core.download_file(server_file_path, downloaded_client_file_path)

###############################################################################
# Load CSV Result File as Operator Input
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Load the fields container contained in the CSV file as an operator input:

my_data_sources = core.DataSources(downloaded_client_file_path)
import_csv_operator = core.operators.serialization.csv_to_field()
import_csv_operator.inputs.data_sources.connect(my_data_sources)
downloaded_fc_out = import_csv_operator.outputs.fields_container()
mesh.plot(downloaded_fc_out)

# Remove file to avoid polluting.
os.remove(downloaded_client_file_path)

###############################################################################
# Make Operations Over the Imported Fields Container
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Use this fields container:

min_max_op = core.operators.min_max.min_max_fc()
min_max_op.inputs.fields_container.connect(downloaded_fc_out)
files_aux = [base_path + r'/file0.rst', base_path + r'/file1.rst']

###############################################################################
# Create the operators on the servers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# On each server we create two new operators, one for 'displacement' computations
# and a 'mesh_provider' operator and then define their data sources. The displacement
# and mesh_provider operators receive data from their respective data files on each server.
remote_displacement_operators = []
remote_mesh_operators = []
for i, server in enumerate(remote_servers):
    displacement = ops.result.displacement(server=server)
    mesh = ops.mesh.mesh_provider(server=server)
    remote_displacement_operators.append(displacement)
    remote_mesh_operators.append(mesh)
    ds = dpf.DataSources(files[i], server=server)
    ds.add_file_path(files_aux[i])
    displacement.inputs.data_sources(ds)
    mesh.inputs.data_sources(ds)

###############################################################################
# Create a local operators chain for expansion
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# In the following series of operators we merge the modal basis, the meshes, read
# the modal response and expand the modal response with the modal basis.

merge_fields = ops.utility.merge_fields_containers()
merge_mesh = ops.utility.merge_meshes()

ds = dpf.DataSources(base_path + r'/file_load_1.rfrq')
response = ops.result.displacement(data_sources=ds)
示例#20
0
reduces the size of the result files.
"""

from ansys.dpf import core as dpf
from ansys.dpf.core import examples

###############################################################################
# Create the data sources
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# First create a data sources with the mode shapes and the modal response
# The expansion is recursive in dpf: first the modal response is red,
# then, "upstreams" mode shapes are found in the data sources, so they
# are red and expanded (mode shapes x modal response)

msup_files = examples.download_msup_files_to_dict()
data_sources = dpf.DataSources(msup_files["rfrq"])
up_stream_data_sources = dpf.DataSources(msup_files["mode"])
up_stream_data_sources.add_file_path(msup_files["rst"])

data_sources.add_upstream(up_stream_data_sources)

###############################################################################
# Compute displacements
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Once the recursivity is put in the data sources (with add_upstream)
# computing displacements with or without expansion, in harmonic, transient
# or modal analysis has the exact same syntax

model = dpf.Model(data_sources)
disp = model.results.displacement.on_all_time_freqs.eval()
示例#21
0
###############################################################################
# Import dpf module and its examples files, and create a temporary directory

import os
import tempfile

from ansys.dpf import core as dpf
from ansys.dpf.core import examples
from ansys.dpf.core import operators as ops

tmpdir = tempfile.mkdtemp()

###############################################################################
# Create the operator and connect dataSources

ds = dpf.DataSources(examples.download_sub_file())

matrices_provider = ops.result.cms_matrices_provider()
matrices_provider.inputs.data_sources.connect(ds)

###############################################################################
# Get result fields container that contains the reduced matrices

fields = matrices_provider.outputs.fields_container()

len(fields)

fields[0].data

###############################################################################
# Export the result fields container in hdf5 format
示例#22
0
###############################################################################
# Plot the displacement:
print(model.metadata.meshed_region.plot(disp_op.outputs.fields_container()))

###############################################################################
# Scripting Operator Syntax
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# Because DPF provides a scripting syntax, knowing
# an operator's "string name" is not mandatory.
# While this example is similar to the above script, it uses the DPF
# scripting syntax.

###############################################################################
# Instead of using a ``model`` class instance, use a
# ``DdataSources`` object directly. The ``DataSources`` constructor input is a path.
ds = dpf.DataSources(examples.static_rst)
print(examples.static_rst)

###############################################################################
# Instantiate the operators and connect them:

disp_op = dpf.operators.result.displacement()
disp_op.inputs.data_sources.connect(ds)
norm_op = dpf.operators.math.norm_fc()
norm_op.inputs.connect(disp_op.outputs)
mm_op = dpf.operators.min_max.min_max_fc()
mm_op.inputs.connect(norm_op.outputs)

###############################################################################
# Get the output and print the result data:
# in shared memory

files = examples.download_distributed_files()
server_file_paths = [
    dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
    dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])
]

###############################################################################
# First operator chain.

remote_operators = []

stress1 = ops.result.stress(server=remote_servers[0])
remote_operators.append(stress1)
ds = dpf.DataSources(server_file_paths[0], server=remote_servers[0])
stress1.inputs.data_sources(ds)

###############################################################################
# Second operator chain.

stress2 = ops.result.stress(server=remote_servers[1])
mul = stress2 * 2.0
remote_operators.append(mul)
ds = dpf.DataSources(server_file_paths[1], server=remote_servers[1])
stress2.inputs.data_sources(ds)

###############################################################################
# Local merge operator.

merge = ops.utility.merge_fields_containers()
4th step: Compare the results between nodal stress from data source
reference and nodal stress computed by the extrapolation method.

"""

from ansys.dpf import core as dpf
from ansys.dpf.core import examples

###############################################################################
# Get the data source's analyse of integration points and data source's analyse reference
datafile = examples.download_extrapolation_3d_result()

# integration points (Gaussian points)
data_integration_points = datafile["file_integrated"]
data_sources_integration_points = dpf.DataSources(data_integration_points)

# reference
dataSourceref = datafile["file_ref"]
data_sources_ref = dpf.DataSources(dataSourceref)

# get the mesh
model = dpf.Model(data_integration_points)
mesh = model.metadata.meshed_region

# operator instantiation scoping
op_scoping = dpf.operators.scoping.split_on_property_type()  # operator instantiation
op_scoping.inputs.mesh.connect(mesh)
op_scoping.inputs.requested_location.connect("Elemental")
mesh_scoping = op_scoping.outputs.mesh_scoping()
示例#25
0
case one file is written by spatial or temporal domains. The capability of
reading one result in distributed files has been implemented in DPF. This
allows to skip the merging of files solver side which is time consuming and often
duplicates the memory used.
"""

from ansys.dpf import core as dpf
from ansys.dpf.core import examples

###############################################################################
# Create the data sources
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# First create a data sources with one result file by domain

distributed_file_path = examples.download_distributed_files()
data_sources = dpf.DataSources()
data_sources.set_domain_result_file_path(distributed_file_path[0], 0)
data_sources.set_domain_result_file_path(distributed_file_path[1], 1)

###############################################################################
# Compute displacements
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Once the file architecture is put in the data sources,
# computing displacements with or without domain has the exact same syntax.
# DPF reads parts of the result on each domain and remerge the results in
# the outputs fields. The output will have no difference when using combined
# or distributed files

model = dpf.Model(data_sources)
disp = model.results.displacement.on_all_time_freqs.eval()
]

###############################################################################
# Create the operators on the servers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# On each server we create two new operators for 'displacement' and 'norm'
# computations and define their data sources. The displacement operator
# receives data from the data file in its respective server. And the norm
# operator, being chained to the displacement operator, receives input from the
# output of this one.
remote_operators = []
for i, server in enumerate(remote_servers):
    displacement = ops.result.displacement(server=server)
    norm = ops.math.norm_fc(displacement, server=server)
    remote_operators.append(norm)
    ds = dpf.DataSources(server_file_paths[i], server=server)
    displacement.inputs.data_sources(ds)

###############################################################################
# Create a merge_fields_containers operator able to merge the results
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

merge = ops.utility.merge_fields_containers()

###############################################################################
# Connect the operators together and get the output
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

for i, server in enumerate(remote_servers):
    merge.connect(i, remote_operators[i], 0)
]

###############################################################################
# Create the operators on the servers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# On each server we create two new operators, one for 'displacement' computations
# and a 'mesh_provider' operator, and then define their data sources. The displacement
# and mesh_provider operators receive data from their respective data files on each server.
remote_displacement_operators = []
remote_mesh_operators = []
for i, server in enumerate(remote_servers):
    displacement = ops.result.displacement(server=server)
    mesh = ops.mesh.mesh_provider(server=server)
    remote_displacement_operators.append(displacement)
    remote_mesh_operators.append(mesh)
    ds = dpf.DataSources(files[i], server=server)
    ds.add_file_path(files_aux[i])
    displacement.inputs.data_sources(ds)
    mesh.inputs.data_sources(ds)

###############################################################################
# Create a local operators chain for expansion
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# In the follwing series of operators we merge the modal basis, the meshes, read
# the modal response and expand the modal response with the modal basis.

merge_fields = ops.utility.merge_fields_containers()
merge_mesh = ops.utility.merge_meshes()

ds = dpf.DataSources(os.path.join(base_path, "file_load_1.rfrq"))
response = ops.result.displacement(data_sources=ds)