Beispiel #1
0
def TestFetchDataRanks(producer, ranks, allGather=False):
    print_info("TestFetchDataRanks %s" % repr(ranks))
    dataMap = FetchData(producer,
                        SourceRanks=ranks,
                        GatherOnAllRanks=allGather)

    if not allGather and GetIsSymmetric() and GetRank() > 0:
        # dataMap must be empty.
        if dataMap:
            print_error("FetchData should not deliver any data on satellites!")
            return False
    else:
        dataRanks = [x for x in dataMap.keys()]
        expectedRanks = []

        numRanks = GetNumberOfRanks()
        for r in ranks:
            if r < numRanks:
                expectedRanks.append(r)

        if dataRanks != expectedRanks:
            print_error("ranks mismatch %s != %s", repr(dataRanks),
                        repr(expectedRanks))
            return False

        # ensure we got valid data from all ranks
        for rank, data in dataMap.items():
            if not ValidateData(producer, data):
                print_error("bad data from rank %d", rank)
                return False
    return True
Beispiel #2
0
def catalyst_finalize():
    print_info("in '%s::catalyst_finalize'", __name__)
    global tracker
    assert tracker["catalyst_initialize"] == 1
    assert tracker["catalyst_finalize"] == 1
    assert tracker["catalyst_execute"] >= 1
    print_info("All's ok")
Beispiel #3
0
def TestFetchDataBasic(producer, allGather=False):
    print_info("TestFetchDataBasic")
    dataMap = FetchData(producer, GatherOnAllRanks=allGather)

    if not allGather and GetIsSymmetric() and GetRank() > 0:
        # dataMap must be empty.
        if dataMap:
            print_error("FetchData should not deliver any data on satellites!")
            return False
    else:
        dataRanks = [x for x in dataMap.keys()]

        # ensure we got data from all rank.
        numRanks = GetNumberOfRanks()

        if len(dataRanks) != numRanks:
            print_error("rank mismatch len(%s) != %d", repr(dataRanks),
                        numRanks)
            return False

        # ensure we got valid data from all ranks
        for rank, data in dataMap.items():
            if not ValidateData(producer, data):
                print_error("bad data from rank %d", rank)
                return False

    return True
Beispiel #4
0
def main(args):
    """The main loop"""

    # this globbling logic is copied from `filedriver.py`. It may be worth
    # cleaning this up to ensure it handles typical use-cases we encounter.
    files = glob.glob(args.glob)

    # In case the filenames aren't padded we sort first by shorter length and then
    # alphabetically. This is a slight modification based on the question by Adrian and answer by
    # Jochen Ritzel at:
    # https://stackoverflow.com/questions/4659524/how-to-sort-by-length-of-string-followed-by-alphabetical-order
    files.sort(key=lambda item: (len(item), item))

    # initialize Catalyst
    from paraview.catalyst import bridge
    from paraview import print_info, print_warning
    bridge.initialize()

    # add analysis script
    for script in args.script:
        bridge.add_pipeline(script, args.script_version)

    # Some MPI related stuff to figure out if we're running with MPI
    # and if so, on how many ranks.
    try:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        num_ranks = comm.Get_size()
    except ImportError:
        print_warning("missing mpi4py, running in serial (non-distributed) mode")
        rank = 0
        num_ranks = 1

    reader = create_reader(files)
    timesteps = reader.TimestepValues[:]
    step = 0
    numsteps = len(timesteps)
    for time in timesteps:
        if args.delay > 0:
            import time
            time.sleep(args.delay)

        if rank == 0:
            print_info("timestep: {0} of {1}".format((step+1), numsteps))

        dataset, wholeExtent = read_dataset(reader, time, rank, num_ranks)

        # "perform" coprocessing.  results are outputted only if
        # the passed in script says we should at time/step
        bridge.coprocess(time, step, dataset, name=args.channel, wholeExtent=wholeExtent)

        del dataset
        del wholeExtent
        step += 1

    # finalize Catalyst
    bridge.finalize()
Beispiel #5
0
def main(args):
    """The main loop"""

    # initialize Catalyst
    from paraview.catalyst import bridge
    from paraview import print_info, print_warning
    bridge.initialize()

    # add analysis script
    for script in args.script:
        bridge.add_pipeline(script, args.script_version)

    # Some MPI related stuff to figure out if we're running with MPI
    # and if so, on how many ranks.
    try:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        num_ranks = comm.Get_size()
    except ImportError:
        print_warning(
            "missing mpi4py, running in serial (non-distributed) mode")
        rank = 0
        num_ranks = 1

    numsteps = args.timesteps
    for step in range(numsteps):
        if args.delay > 0:
            import time
            time.sleep(args.delay)

        if rank == 0:
            print_info("timestep: {0}/{1}".format(step + 1, numsteps))

        # assume simulation time starts at 0
        time = step / float(numsteps)

        dataset, wholeExtent = create_dataset(step, args, rank, num_ranks)

        # "perform" coprocessing.  results are outputted only if
        # the passed in script says we should at time/step
        bridge.coprocess(time,
                         step,
                         dataset,
                         name=args.channel,
                         wholeExtent=wholeExtent)

        del dataset
        del wholeExtent

    # finalize Catalyst
    bridge.finalize()
Beispiel #6
0
import sys
from paraview import smtesting

smtesting.ProcessCommandLineArguments()

LoadDistributedPlugin('SurfaceLIC', ns=globals())

filename = smtesting.DataDir + '/Testing/Data/disk_out_ref.ex2'
data = OpenDataFile(filename)
UpdatePipeline()
view = CreateRenderView()
rep = Show(view=view, representationType="UnstructuredGridRepresentation")

# Ensure that loading the SurfaceLIC lead to adding the SelectLICVectors
# property correctly.
print_info(rep.GetProperty("SelectInputVectors"))
print_info(rep.SelectInputVectors)

try:
    LoadDistributedPlugin("NonExistantPluginName", ns=globals())
    print_error(
        "Error: LoadDistributedPlugin should have thrown a RunTimeError!!!")
    sys.exit(1)
except:
    # We expect an error.
    pass

try:
    MomentVectors()
    print_error(
        "Error: MomentVectors should not exist before loading the plugin")
Beispiel #7
0
import paraview
from paraview import print_info

if hasattr(paraview, "repeated_pipeline_count"):
    paraview.repeated_pipeline_count += 1
else:
    setattr(paraview, "repeated_pipeline_count", 1)

# If you change the txt here, don't forget to update the CMakeLists.txt
# for `PASS_REGULAR_EXPRESSION`.
print_info("%s: Importing 'repeated_pipeline' for %d-th time!" %
           (__name__, paraview.repeated_pipeline_count))
"""This is a test to test the paraview proxy manager API."""
from paraview import servermanager, print_info, print_error

import sys

servermanager.Connect()

for source in dir(servermanager.sources):
    try:
        print_info('Creating %s ...' % source)
        if source in [
                "GenericIOReader", 'EnsembleDataReader', 'openPMDReader'
        ]:
            print_info("...skipping (in exclusion list)")
        else:
            s = getattr(servermanager.sources, source)()
            s.UpdateVTKObjects()
            print_info("...... ok")
    except:
        print_error("failed!")
        raise RuntimeError('Failed to create %s' % source)
Beispiel #9
0
from paraview.simple import *
from paraview import servermanager, print_info, print_error

import time

# Make sure the test driver know that process has properly started
print_info ("Process started")
errors = 0

#-------------------- Helpers methods ----------------
def getHost(url):
   return url.split(':')[1][2:]

def getScheme(url):
   return url.split(':')[0]

def getPort(url):
   return int(url.split(':')[2])
#--------------------
import os

def findInSubdirectory(filename, subdirectory=''):
    if subdirectory:
        path = subdirectory
    else:
        path = os.getcwd()
    for root, dirs, names in os.walk(path):
        for name in names:
           if (name.find(filename) > -1) and ( (name.find('.dll') > -1) or (name.find('.so') > -1) or (name.find('.dylib') > -1)):
              return os.path.join(root, name)
    raise RuntimeError ('File not found')
from paraview import smtesting
from paraview.simple import *
from paraview import print_info

smtesting.ProcessCommandLineArguments()
fnames = ["can.e.4.0", "can.e.4.1", "can.e.4.2", "can.e.4.3"]
fnames = [ "%s/Testing/Data/can.e.4/%s" % (smtesting.DataDir, x) for x in fnames]

reader = OpenDataFile(fnames)
extractSurface = ExtractSurface(reader)
extractSurface.UpdatePipeline()

tempDir = smtesting.GetUniqueTempDirectory(smtesting.TempDir + "/ParallelSerialWriterWithIOSS-")
print_info("Generating results in '%s'", tempDir)
SaveData(tempDir + "/can.stl", extractSurface)

block0 = OpenDataFile(tempDir +"/can0.stl")
block1 = OpenDataFile(tempDir +"/can1.stl")
GroupDatasets(Input=[block0, block1])
Show()
view = Render()
camera = GetActiveCamera()
camera.Azimuth(90)

smtesting.DoRegressionTesting(view.SMProxy)
if not smtesting.DoRegressionTesting(view.SMProxy):
    raise smtesting.TestError('Test failed.')
Beispiel #11
0
def catalyst_execute(info):
    print_info("in '%s::catalyst_execute'", __name__)
Beispiel #12
0
def catalyst_initialize():
    print_info("in '%s::catalyst_initialize'", __name__)
Beispiel #13
0
# filename: __init__.py
# used to test that Catalyst can load Packages
# correctly.

from paraview.simple import *
from paraview import print_info

# print start marker
print_info("begin '%s'", __name__)

tracker = {}


def count(f):
    def wrapper(*args, **kwargs):
        global tracker
        c = tracker.get(f.__name__, 0)
        tracker[f.__name__] = c + 1
        return f(*args, **kwargs)

    return wrapper


@count
def catalyst_initialize():
    print_info("in '%s::catalyst_initialize'", __name__)


@count
def catalyst_execute(info):
    print_info("in '%s::catalyst_execute'", __name__)
Beispiel #14
0
                        repr(expectedRanks))
            return False

        # ensure we got valid data from all ranks
        for rank, data in dataMap.items():
            if not ValidateData(producer, data):
                print_error("bad data from rank %d", rank)
                return False
    return True


if __name__ == "__main__":
    wavelet = Wavelet()
    UpdatePipeline()
    if not TestFetchDataBasic(wavelet):
        raise RuntimeError("TestFetchDataBasic failed")
    if not TestFetchDataRanks(wavelet, [0]):
        raise RuntimeError("TestFetchDataRanks([0]) failed")
    if not TestFetchDataRanks(wavelet, [1, 2]):
        raise RuntimeError("TestFetchDataRanks([1,2]) failed")
    if GetIsSymmetric():
        print_info("In symmetric mode, so try gather on all ranks now")
        if not TestFetchDataBasic(wavelet, True):
            raise RuntimeError("TestFetchDataBasic(allGather = True) failed")
        if not TestFetchDataRanks(wavelet, [0], True):
            raise RuntimeError(
                "TestFetchDataRanks([0], allGather=True) failed")
        if not TestFetchDataRanks(wavelet, [1, 2], True):
            raise RuntimeError(
                "TestFetchDataRanks([1,2], allGather=True) failed")