Ejemplo n.º 1
0
def main(args):
    """The main loop"""

    # this globbling logic is copied from `filedriver.py`. It may be worth
    # cleaning this up to ensure it handles typical use-cases we encounter.
    files = glob.glob(args.glob)

    # In case the filenames aren't padded we sort first by shorter length and then
    # alphabetically. This is a slight modification based on the question by Adrian and answer by
    # Jochen Ritzel at:
    # https://stackoverflow.com/questions/4659524/how-to-sort-by-length-of-string-followed-by-alphabetical-order
    files.sort(key=lambda item: (len(item), item))

    # initialize Catalyst
    from paraview.catalyst import bridge
    from paraview import print_info, print_warning
    bridge.initialize()

    # add analysis script
    for script in args.script:
        bridge.add_pipeline(script, args.script_version)

    # Some MPI related stuff to figure out if we're running with MPI
    # and if so, on how many ranks.
    try:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        num_ranks = comm.Get_size()
    except ImportError:
        print_warning("missing mpi4py, running in serial (non-distributed) mode")
        rank = 0
        num_ranks = 1

    reader = create_reader(files)
    timesteps = reader.TimestepValues[:]
    step = 0
    numsteps = len(timesteps)
    for time in timesteps:
        if args.delay > 0:
            import time
            time.sleep(args.delay)

        if rank == 0:
            print_info("timestep: {0} of {1}".format((step+1), numsteps))

        dataset, wholeExtent = read_dataset(reader, time, rank, num_ranks)

        # "perform" coprocessing.  results are outputted only if
        # the passed in script says we should at time/step
        bridge.coprocess(time, step, dataset, name=args.channel, wholeExtent=wholeExtent)

        del dataset
        del wholeExtent
        step += 1

    # finalize Catalyst
    bridge.finalize()
Ejemplo n.º 2
0
def main(args):
    """The main loop"""

    # initialize Catalyst
    from paraview.catalyst import bridge
    from paraview import print_info, print_warning
    bridge.initialize()

    # add analysis script
    for script in args.script:
        bridge.add_pipeline(script, args.script_version)

    # Some MPI related stuff to figure out if we're running with MPI
    # and if so, on how many ranks.
    try:
        from mpi4py import MPI
        comm = MPI.COMM_WORLD
        rank = comm.Get_rank()
        num_ranks = comm.Get_size()
    except ImportError:
        print_warning(
            "missing mpi4py, running in serial (non-distributed) mode")
        rank = 0
        num_ranks = 1

    numsteps = args.timesteps
    for step in range(numsteps):
        if args.delay > 0:
            import time
            time.sleep(args.delay)

        if rank == 0:
            print_info("timestep: {0}/{1}".format(step + 1, numsteps))

        # assume simulation time starts at 0
        time = step / float(numsteps)

        dataset, wholeExtent = create_dataset(step, args, rank, num_ranks)

        # "perform" coprocessing.  results are outputted only if
        # the passed in script says we should at time/step
        bridge.coprocess(time,
                         step,
                         dataset,
                         name=args.channel,
                         wholeExtent=wholeExtent)

        del dataset
        del wholeExtent

    # finalize Catalyst
    bridge.finalize()
Ejemplo n.º 3
0
def finalize():
    # finalize ParaView Catalyst.
    bridge.finalize()
Ejemplo n.º 4
0
        print("timestep: {0}/{1}".format(step + 1, numsteps))

    # assume simulation time starts at 0
    time = step / float(numsteps)

    # put in some variation in the point data that changes with time
    wavelet.SetMaximum(255 + 200 * math.sin(step))

    # using 'UpdatePiece' lets us generate a subextent based on the
    # 'rank' and 'num_ranks'; thus works seamlessly in distributed and
    # non-distributed modes
    wavelet.UpdatePiece(rank, num_ranks, 0)

    # typically, here you'll have some adaptor code that converts your
    # simulation data into a vtkDataObject subclass. In this example,
    # there's nothing to do since we're directly generating a
    # vtkDataObject.
    dataset = wavelet.GetOutputDataObject(0)

    # "perform" coprocessing.  results are outputted only if
    # the passed in script says we should at time/step
    bridge.coprocess(time,
                     step,
                     dataset,
                     name=args.channel,
                     wholeExtent=wholeExtent)

    del dataset

bridge.finalize()