Ejemplo n.º 1
0
    def setUp(self):
        NX = 10
        size = 2
        t = np.array(range(NX*size), dtype=np.float64)
        tt = t.reshape((size, NX))

        self.temp = TempFile()

        ad.init_noxml()
        ad.allocate_buffer (ad.BUFFER_ALLOC_WHEN.NOW, 10);

        fw = ad.writer(self.temp.path)
        fw.declare_group('group', method='POSIX1')

        fw['NX'] = NX
        fw['size'] = size
        fw['temperature'] = tt
        fw.attrs['/temperature/description'] = "Global array written from 'size' processes"
        fw.attrs["/someSubGroup/anOtherGroup/anOtherAttribute"] = 99
        fw["/someSubGroup/anOtherGroup/anOtherVariable"] = 77
        fw.close()

        ad.finalize()

        self.f = ad.file(self.temp.path)
Ejemplo n.º 2
0
def open_variable(filename, variable_path, distaxis=0):
    """Create a pyDive.adios.ad_ndarray instance from file.

    :param filename: name of adios file.
    :param variable_path: path within adios file to a single variable.
    :param distaxis int: distributed axis
    :return: pyDive.adios.ad_ndarray instance
    """
    fileHandle = ad.file(filename)
    variable = fileHandle.var[variable_path]
    dtype = variable.type
    shape = tuple(variable.dims)
    fileHandle.close()

    result = ad_ndarray(shape, dtype, distaxis, None, None, True)

    target_shapes = result.target_shapes()
    target_offset_vectors = result.target_offset_vectors()

    view = com.getView()
    view.scatter("shape", target_shapes, targets=result.target_ranks)
    view.scatter("offset", target_offset_vectors, targets=result.target_ranks)
    view.execute("{0} = pyDive.arrays.local.ad_ndarray.ad_ndarray('{1}','{2}',shape=shape[0],offset=offset[0])"\
        .format(result.name, filename, variable_path), targets=result.target_ranks)

    return result
Ejemplo n.º 3
0
    def setUp(self):
        self.temp = TempFile()

        ad.init_noxml()

        ad.allocate_buffer (ad.BUFFER_ALLOC_WHEN.NOW, 10);
        g = ad.declare_group("temperature", "", ad.FLAG.YES)
        ad.define_var(g, "NX", "", ad.DATATYPE.integer, "", "", "")
        ad.define_var(g, "size", "", ad.DATATYPE.integer, "", "", "")
        ad.define_var(g, "temperature", "", ad.DATATYPE.double, "size,NX", "size,NX", "0,0")
        ad.define_var(g, "temperature", "", ad.DATATYPE.double, "size,NX", "size,NX", "0,0")
        self.msg = "this is a test"
        ad.define_attribute(g, "desc", "", ad.DATATYPE.string, self.msg, "")
        ad.define_attribute(g, "temperature/unit", "", ad.DATATYPE.string, "C", "")
        ad.define_attribute(g, "temperature/desc", "", ad.DATATYPE.string, "description", "")
        ad.define_attribute(g, "/subgroup/subsubgroup/otherattr", "", ad.DATATYPE.string, "another", "")
        ad.define_var(g, "/subgroup/subsubgroup/othervar", "", ad.DATATYPE.integer, "", "", "")
        ad.select_method(g, "POSIX1", "verbose=3", "")

        fd = ad.open("temperature", self.temp.path, "w")
        self.NX = 10
        self.size = 2
        groupsize =  4 + 4 + 8 * self.size * self.NX + 4
        t = np.array(range(self.NX * self.size), dtype=np.float64)
        self.tt = t.reshape((self.size, self.NX))
        ad.set_group_size(fd, groupsize)
        ad.write_int(fd, "NX", self.NX)
        ad.write_int(fd, "size", self.size)
        ad.write(fd, "temperature", self.tt)
        ad.write_int(fd, "/subgroup/subsubgroup/othervar", 99)
        ad.close(fd)

        ad.finalize()

        self.f = ad.file(self.temp.path)
Ejemplo n.º 4
0
    def setUp(self):
        self.temp = TempFile()

        ad.init_noxml()

        ad.allocate_buffer (ad.BUFFER_ALLOC_WHEN.NOW, 10);
        g = ad.declare_group("temperature", "", ad.FLAG.YES)
        ad.define_var(g, "NX", "", ad.DATATYPE.integer, "", "", "")
        ad.define_var(g, "size", "", ad.DATATYPE.integer, "", "", "")
        ad.define_var(g, "temperature", "", ad.DATATYPE.double, "size,NX", "size,NX", "0,0")
        self.msg = "this is a test"
        self.unit = "C"
        ## attr must be <varpath> + / + something without / (line 857, common_read.c)
        ad.define_attribute(g, "temperature/desc", "", ad.DATATYPE.string, self.msg, "")
        ad.define_attribute(g, "temperature/unit", "", ad.DATATYPE.string, self.unit, "")
        ad.select_method(g, "POSIX1", "verbose=3", "")

        fd = ad.open("temperature", self.temp.path, "w")
        self.NX = 10
        self.size = 2
        groupsize =  4 + 4 + 8 * self.size * self.NX
        t = np.array(range(self.NX * self.size), dtype=np.float64)
        self.tt = t.reshape((self.size, self.NX))
        ad.set_group_size(fd, groupsize)
        ad.write_int(fd, "NX", self.NX)
        ad.write_int(fd, "size", self.size)
        ad.write(fd, "temperature", self.tt)
        ad.close(fd)

        ad.finalize()

        self.f = ad.file(self.temp.path)
Ejemplo n.º 5
0
    def test_adios_groupname(self):
        f = ad.file(self.temp.path)

        # Missing '/'
        g = f["someSubGroup/anOtherGroup"]

        # now match: f.attrs["/someSubGroup/anOtherGroup/anOtherAttribute"]
        self.assertEqual(g.attrs["anOtherAttribute"], f.attrs["/someSubGroup/anOtherGroup/anOtherAttribute"])

        # now match: f["/someSubGroup/anOtherGroup/anOtherVariable"]
        self.assertEqual(g["anOtherVariable"], f["/someSubGroup/anOtherGroup/anOtherVariable"])
Ejemplo n.º 6
0
    def test_adios_group(self):
        f = ad.file(self.temp.path)
        t = f["temperature"]
        # here t could have a member dictionary attr(s) again
        # which looks up all attributes starting with t.name

        # now match: f.attrs["/temperature/description"]
        self.assertEqual(t.attrs["description"], f.attrs["/temperature/description"])

        # the same should be possible for groups
        g = f["/someSubGroup/anOtherGroup"]

        # now match: f.attrs["/someSubGroup/anOtherGroup/anOtherAttribute"]
        self.assertEqual(g.attrs["anOtherAttribute"], f.attrs["/someSubGroup/anOtherGroup/anOtherAttribute"])

        # now match: f["/someSubGroup/anOtherGroup/anOtherVariable"]
        self.assertEqual(g["anOtherVariable"], f["/someSubGroup/anOtherGroup/anOtherVariable"])
Ejemplo n.º 7
0
    def test_writer_undefined_var(self):
        self.temp = TempFile()

        NX = 10
        val1 = np.array(list(range(NX)), dtype=np.int32)
        val2 = np.array(list(range(5)), dtype='f8')

        fw = ad.writer(self.temp.path)
        fw.declare_group("group", method="POSIX1")

        fw['NX'] = NX
        fw['val1'] = val1
        fw['val2'] = val2
        fw.close()

        f = ad.file(self.temp.path)
        self.assertEqual(f['NX'][...], NX)
        self.assertTrue((f['val1'][:] == val1).all())
        self.assertTrue((f['val2'][:] == val2).all())
Ejemplo n.º 8
0
    def test_writer_undefined_var(self):
        self.temp = TempFile()

        NX = 10
        val1 = np.array(range(NX), dtype=np.int32)
        val2 = np.array(range(5), dtype="f8")

        fw = ad.writer(self.temp.path)
        fw.declare_group("group", method="POSIX1")

        fw["NX"] = NX
        fw["val1"] = val1
        fw["val2"] = val2
        fw.close()

        f = ad.file(self.temp.path)
        self.assertEqual(f["NX"][:], NX)
        self.assertTrue((f["val1"][:] == val1).all())
        self.assertTrue((f["val2"][:] == val2).all())
Ejemplo n.º 9
0
    def test_writer_transform(self):
        self.temp = TempFile()

        NX = 10
        val1 = np.array(list(range(NX)), dtype=np.int32)
        val2 = np.array(list(range(5)), dtype='f8')

        fw = ad.writer(self.temp.path, method="POSIX1")

        fw['NX'] = NX
        fw['val1'] = val1
        fw['val2'] = val2
        fw['val1'].transform = 'identity'
        fw.close()

        f = ad.file(self.temp.path)
        self.assertEqual(f['NX'][...], NX)
        self.assertTrue((f['val1'][:] == val1).all())
        self.assertTrue((f['val2'][:] == val2).all())
Ejemplo n.º 10
0
def main():
    fname = ""
    if len(sys.argv) < 2:
        usage()
        sys.exit(1)
    else:
        fname = sys.argv[1]

    f = ad.file(fname)

    print "File info:"
    print "  %-18s %d" % ("of variables:", f.nvars)
    print "  %-18s %d - %d" % ("time steps:", f.current_step, f.last_step)
    print "  %-18s %d" % ("file size:", f.file_size)
    print "  %-18s %d" % ("bp version:", f.version)
    print ""
    
    for k in sorted(f.var.keys()):
        v = f.var[k]
        print "  %-17s  %-12s  %d*%s" % (np.typename(np.sctype2char(v.dtype)), v.name, v.nsteps, v.dims)
Ejemplo n.º 11
0
    def test_writer_timeaggregation(self):
        self.temp = TempFile()

        NX = 10
        val1 = np.array(list(range(NX)), dtype=np.int32)
        val2 = np.array(list(range(5)), dtype='f8')

        fw = ad.writer(self.temp.path, method="POSIX1")
        fw.set_time_aggregation(3200)

        fw['NX'] = NX
        fw['val1'] = val1
        fw['val2'] = val2
        fw.close()
        ad.finalize()

        f = ad.file(self.temp.path)
        self.assertEqual(f['NX'][...], NX)
        self.assertTrue((f['val1'][:] == val1).all())
        self.assertTrue((f['val2'][:] == val2).all())
Ejemplo n.º 12
0
def main():
    fname = ""
    if len(sys.argv) < 2:
        usage()
        sys.exit(1)
    else:
        fname = sys.argv[1]

    f = ad.file(fname)

    print "File info:"
    print "  %-18s %d" % ("of variables:", f.nvars)
    print "  %-18s %d - %d" % ("time steps:", f.current_step, f.last_step)
    print "  %-18s %d" % ("file size:", f.file_size)
    print "  %-18s %d" % ("bp version:", f.version)
    print ""

    for k in sorted(f.var.keys()):
        v = f.var[k]
        print "  %-17s  %-12s  %d*%s" % (np.typename(np.sctype2char(v.dtype)), v.name, v.nsteps, v.dims)
Ejemplo n.º 13
0
    def setUp(self):
        self.temp = TempFile()

        ad.init_noxml()

        ad.allocate_buffer(ad.BUFFER_ALLOC_WHEN.NOW, 10)
        g = ad.declare_group("temperature", "", ad.FLAG.YES)
        ad.define_var(g, "NX", "", ad.DATATYPE.integer, "", "", "")
        ad.define_var(g, "size", "", ad.DATATYPE.integer, "", "", "")
        ad.define_var(g, "temperature", "", ad.DATATYPE.double, "size,NX",
                      "size,NX", "0,0")
        ad.define_var(g, "temperature", "", ad.DATATYPE.double, "size,NX",
                      "size,NX", "0,0")
        self.msg = "this is a test"
        ad.define_attribute(g, "desc", "", ad.DATATYPE.string, self.msg, "")
        ad.define_attribute(g, "temperature/unit", "", ad.DATATYPE.string, "C",
                            "")
        ad.define_attribute(g, "temperature/desc", "", ad.DATATYPE.string,
                            "description", "")
        ad.define_attribute(g, "/subgroup/subsubgroup/otherattr", "",
                            ad.DATATYPE.string, "another", "")
        ad.define_var(g, "/subgroup/subsubgroup/othervar", "",
                      ad.DATATYPE.integer, "", "", "")
        ad.select_method(g, "POSIX1", "verbose=3", "")

        fd = ad.open("temperature", self.temp.path, "w")
        self.NX = 10
        self.size = 2
        groupsize = 4 + 4 + 8 * self.size * self.NX + 4
        t = np.array(range(self.NX * self.size), dtype=np.float64)
        self.tt = t.reshape((self.size, self.NX))
        ad.set_group_size(fd, groupsize)
        ad.write_int(fd, "NX", self.NX)
        ad.write_int(fd, "size", self.size)
        ad.write(fd, "temperature", self.tt)
        ad.write_int(fd, "/subgroup/subsubgroup/othervar", 99)
        ad.close(fd)

        ad.finalize()

        self.f = ad.file(self.temp.path)
Ejemplo n.º 14
0
    def __init__(self, filename, variable_path, shape=None, window=None, offset=None):
        self.filename = filename
        self.variable_path = variable_path

        fileHandle = ad.file(filename)
        variable = fileHandle.var[variable_path]
        self.dtype = variable.type
        if shape is None:
            shape = variable.dims
        self.shape = tuple(shape)
        fileHandle.close()

        if window is None:
            window = [slice(0, s, 1) for s in shape]
        self.window = tuple(window)
        if offset is None:
            offset = (0,) * len(shape)
        self.offset = tuple(offset)

        #: total bytes consumed by the elements of the array.
        self.nbytes = np.dtype(self.dtype).itemsize * np.prod(self.shape)
Ejemplo n.º 15
0
    def test_writer_attr2(self):
        self.temp = TempFile()

        NX = 10
        val1 = np.array(list(range(NX)), dtype=np.int32)
        val2 = np.array(list(range(5)), dtype='f8')

        single_string = "ABCD"
        three_string = ("AA", "BBB", "CCCC")
        single_int = 10
        five_int = np.array(list(range(5)), dtype=np.int32)
        single_double = 1.1
        five_double = np.array(list(range(5)), dtype='double') * 1.1
        unicode_string = u"unicode"
        bytes_string = u"bytes"

        fw = ad.writer(self.temp.path, method="POSIX1")

        fw.attrs['single_string'] = single_string
        fw.attrs['three_string'] = three_string
        fw.attrs['single_int'] = single_int
        fw.attrs['five_int'] = five_int
        fw.attrs['single_double'] = single_double
        fw.attrs['five_double'] = five_double
        fw.attrs['unicode_string'] = unicode_string
        fw.attrs['bytes_string'] = bytes_string
        fw.close()

        f = ad.file(self.temp.path)
        self.assertEqual(f['single_string'].value, single_string.encode())
        ##self.assertTrue((f['three_string'].value == three_string).all())
        self.assertTrue(f['three_string'].value[0], three_string[0].encode())
        self.assertTrue(f['three_string'].value[1], three_string[1].encode())
        self.assertTrue(f['three_string'].value[2], three_string[2].encode())
        self.assertEqual(f['single_int'].value, single_int)
        self.assertTrue((f['five_int'].value == five_int).all())
        self.assertEqual(f['single_double'].value, single_double)
        self.assertTrue((f['five_double'].value == five_double).all())
        self.assertTrue(f['unicode_string'].value, unicode_string.encode())
        self.assertTrue(f['bytes_string'].value, bytes_string)
Ejemplo n.º 16
0
    def test_writer_attr(self):
        self.temp = TempFile()

        NX = 10
        val1 = np.array(range(NX), dtype=np.int32)
        val2 = np.array(range(5), dtype="f8")

        single_string = "ABCD"
        three_string = ("AA", "BBB", "CCCC")
        single_int = 10
        five_int = np.array(range(5), dtype=np.int32)
        single_double = 1.1
        five_double = np.array(range(5), dtype="double") * 1.1

        fw = ad.writer(self.temp.path)
        fw.declare_group("group", method="POSIX1")

        fw.define_attr("single_string")
        fw.define_attr("three_string")
        fw.define_attr("single_int")
        fw.define_attr("five_int")
        fw.define_attr("single_double")
        fw.define_attr("five_double")

        fw["single_string"] = single_string
        fw["three_string"] = three_string
        fw["single_int"] = single_int
        fw["five_int"] = five_int
        fw["single_double"] = single_double
        fw["five_double"] = five_double
        fw.close()

        f = ad.file(self.temp.path)
        self.assertEqual(f["single_string"].value, single_string)
        self.assertTrue((f["three_string"].value == three_string).all())
        self.assertEqual(f["single_int"].value, single_int)
        self.assertTrue((f["five_int"].value == five_int).all())
        self.assertEqual(f["single_double"].value, single_double)
        self.assertTrue((f["five_double"].value == five_double).all())
    def write_scalar(self, adtype, val, varname='val'):
        ad.init_noxml()
        g = ad.declare_group("group", "", ad.FLAG.YES)
        ad.define_var(g, varname, "", adtype, "", "", "")
        ad.select_method(g, "POSIX1", "", "")

        if adtype == ad.DATATYPE.string:
            dtype = ad.adios2npdtype(adtype, len(str(val)))
            npval = np.array(val, dtype=dtype)
        else:
            dtype = ad.adios2npdtype(adtype)
            npval = np.array(val, dtype=dtype)

        fd = ad.open("group", self.temp.path, "w")
        ##ad.set_group_size(fd, npval.nbytes)
        ad.write(fd, varname, val, dtype)
        ad.close(fd)
        ad.finalize()

        f = ad.file(self.temp.path)
        v = f.vars['val']
        self.assertEqual(v.read(), npval)
Ejemplo n.º 18
0
    def __getitem__(self, item):
        sd_format = SDidentifier.SDidentifier(self.filename)
        if sd_format == "h5":
            with h5.File(self.filename) as sdfile:
                dataset = sdfile.keys()
                sd_file = sdfile[dataset[0]]
                return sd_file[item]

        elif sd_format == "nc":
            with xr.open_dataset(self.filename) as sdfile:
                dataset = sdfile.keys()
                sd_file = sdfile[dataset[0]]
                return sd_file.values[item]

        elif sd_format == "sgy":
            sdfile = sg.read(self.filename)
            for i in xrange(item[0]):
                return sdfile[i].data[item[1]]

        elif sd_format == "fits":
            with pyfits.open(self.filename) as sdfile:
                sd_file = sdfile[0].data
                return sd_file[item]

        elif sd_format == "adios":
            with ad.file(self.filename) as sdfile:
                dataset = sdfile.keys()
                sd_file = sdfile.var[dataset[3]].read()
                return sd_file[item]

        elif sd_format == "csv":
            sdfile = np.genfromtxt(self.filename, delimiter=",")
            return sdfile[item]

        elif sd_format == "json":
            jfile = open(self.filename).read()
            jdata = json.loads(jfile)
            sdfile = np.array(jdata)
            return sdfile[item]
Ejemplo n.º 19
0
    def load(self):
        begin = []
        size = []
        for o, w in zip(self.offset, self.window):
            if type(w) is int:
                begin.append(o + w)
                size.append(1)
            else:
                begin.append(o + w.start)
                size.append(w.stop - w.start)

        fileHandle = ad.file(self.filename)
        variable = fileHandle.var[self.variable_path]
        result = variable.read(tuple(begin), tuple(size))
        fileHandle.close()

        # remove all single dimensional axes unless they are a result of slicing, i.e. a[n,n+1]
        single_dim_axes = [axis for axis in range(len(self.window)) if type(self.window[axis]) is int]
        if single_dim_axes:
            result = np.squeeze(result, axis=single_dim_axes)

        return result
Ejemplo n.º 20
0
    def test_writer_attr(self):
        self.temp = TempFile()

        NX = 10
        val1 = np.array(range(NX), dtype=np.int32)
        val2 = np.array(range(5), dtype='f8')

        single_string = "ABCD"
        three_string = ("AA", "BBB", "CCCC")
        single_int = 10
        five_int = np.array(range(5), dtype=np.int32)
        single_double = 1.1
        five_double = np.array(range(5), dtype='double') * 1.1

        fw = ad.writer(self.temp.path)
        fw.declare_group("group", method="POSIX1")

        fw.define_attr("single_string")
        fw.define_attr("three_string")
        fw.define_attr("single_int")
        fw.define_attr("five_int")
        fw.define_attr("single_double")
        fw.define_attr("five_double")

        fw['single_string'] = single_string
        fw['three_string'] = three_string
        fw['single_int'] = single_int
        fw['five_int'] = five_int
        fw['single_double'] = single_double
        fw['five_double'] = five_double
        fw.close()

        f = ad.file(self.temp.path)
        self.assertEqual(f['single_string'].value, single_string)
        self.assertTrue((f['three_string'].value == three_string).all())
        self.assertEqual(f['single_int'].value, single_int)
        self.assertTrue((f['five_int'].value == five_int).all())
        self.assertEqual(f['single_double'].value, single_double)
        self.assertTrue((f['five_double'].value == five_double).all())
Ejemplo n.º 21
0
    def test_writer_empty_define_(self):
        self.temp = TempFile()

        NX = 10
        val1 = np.array(list(range(NX)), dtype=np.int32)
        val2 = np.array(list(range(5)), dtype='f8')

        fw = ad.writer(self.temp.path, method="POSIX1")

        fw.define_var("NX")
        fw.define_var("val1", "NX")
        fw.define_var("val2", val2.shape)
        fw.define_var("extra")

        fw['NX'] = NX
        fw['val1'] = val1
        fw['val2'] = val2
        fw.close()

        f = ad.file(self.temp.path)
        self.assertEqual(f['NX'][...], NX)
        self.assertTrue((f['val1'][:] == val1).all())
        self.assertTrue((f['val2'][:] == val2).all())
Ejemplo n.º 22
0
    def write_scalar(self, adtype, val, varname='val'):
        ad.init_noxml()
        ad.allocate_buffer (ad.BUFFER_ALLOC_WHEN.NOW, 10);
        g = ad.declare_group("group", "", ad.FLAG.YES)
        ad.define_var(g, varname, "", adtype, "", "", "")
        ad.select_method(g, "POSIX1", "", "")

        if adtype == ad.DATATYPE.string:
            dtype = ad.adios2npdtype(adtype, len(str(val)))
            npval = np.array(val, dtype=dtype)
        else:
            dtype = ad.adios2npdtype(adtype)
            npval = np.array(val, dtype=dtype)

        fd = ad.open("group", self.temp.path, "w")
        ad.set_group_size(fd, npval.nbytes)
        ad.write(fd, varname, val, dtype)
        ad.close(fd)
        ad.finalize()

        f = ad.file(self.temp.path)
        v = f.vars['val']
        self.assertEqual(v.read(), npval)
Ejemplo n.º 23
0
    def test_writer_var(self):
        self.temp = TempFile()
        
        NX = 10
        val1 = np.array(range(NX), dtype=np.int32)
        val2 = np.array(range(5), dtype='f8')

        fw = ad.writer(self.temp.path)
        fw.declare_group("group", method="POSIX1")
        
        fw.define_var("NX")
        fw.define_var("val1", "NX")
        fw.define_var("val2", val2.shape)

        fw['NX'] = NX
        fw['val1'] = val1
        fw['val2'] = val2
        fw.close()

        f = ad.file(self.temp.path)
        self.assertEqual(f['NX'][:], NX)
        self.assertTrue((f['val1'][:] == val1).all())
        self.assertTrue((f['val2'][:] == val2).all())
Ejemplo n.º 24
0
def open(filename, datapath, distaxis=0):
    """Create a pyDive.adios.ad_ndarray instance respectively a structure of
    pyDive.adios.ad_ndarray instances from file.

    :param filename: name of adios file.
    :param datapath: path within adios file to a single variable or a group of variables.
    :param distaxis int: distributed axis
    :return: pyDive.adios.ad_ndarray instance
    """
    fileHandle = ad.file(filename)
    variable_paths = fileHandle.var.keys()
    fileHandle.close()

    def update_tree(tree, variable_path, variable_path_iter, leaf):
        node = variable_path_iter.next()
        if node == leaf:
            tree[leaf] = open_variable(filename, variable_path, distaxis)
            return
        tree[node] = {}
        update_tree(tree[node], variable_path, variable_path_iter, leaf)

    n = len(datapath.split("/"))

    structOfArrays = {}
    for variable_path in variable_paths:
        if not variable_path.startswith(datapath):
            continue
        path_nodes = variable_path.split("/")
        path_nodes_it = iter(path_nodes)

        # advance 'path_nodes_it' n times
        next(islice(path_nodes_it, n, n), None)

        update_tree(structOfArrays, variable_path, path_nodes_it, path_nodes[-1])

    return structured.structured(structOfArrays)
Ejemplo n.º 25
0
print ">>> Method:", method

## Writing
for i in range(5):
    fd = ad.open("temperature", "temp.bp", "a")

    NX = 10
    size = 2
    groupsize =  4 + 4 + 8 * size * NX
    t = np.array(range(NX*size), dtype=np.float64) + 100*i
    tt = t.reshape((size, NX))
    ad.set_group_size(fd, groupsize)
    ad.write_int(fd, "NX", NX)
    ad.write_int(fd, "size", size)
    ad.write(fd, "temperature", tt)
    ad.close(fd)

ad.finalize()
print ">>> Done."

f = ad.file('temp.bp')
v = f['temperature']
print(v.attrs['unit'].value)
print(f['temperature/unit'].value)

print(v.attrs['unit/desc'].value)
print(f['temperature/unit/desc'].value)

print(v.attrs['type'].value)
print(f['temperature/type'].value)
def main():
    params = ini.parse(open('input.ini').read())

    # Input parameters
    directory   =   str(params['fileHierarchy']['directory'])
    inDir       =   str(params['fileHierarchy']['inDir'])
    outDir      =   str(params['fileHierarchy']['outDir'])
    imgDir      =   str(params['fileHierarchy']['imgDir'])
    contDir     =   str(params['fileHierarchy']['contDir'])

    fstart      =   int(params['fileSequence']['start'])
    fend        =   int(params['fileSequence']['end'])
    dt          =   int(params['fileSequence']['interval'])

    thresholdDensity = float(params['contourParams']['threshold'])

    show_anim = bool(params['animation']['show'])
    save_anim = bool(params['animation']['save'])
    fps       = float(params['animation']['fps'])

    INDIR =directory+"/"+inDir+"/"
    OUTDIR =directory+"/"+outDir+"/"
    IMGDIR =directory+"/"+outDir+"/"+imgDir+"/"
    CONTDIR =directory+"/"+outDir+"/"+contDir+"/"

    print("===========File Hierarchy===========")
    print("Raw data directory: "+INDIR)
    print("Processed Blob property data directory: "+OUTDIR)
    print("Blob images directory: "+IMGDIR)
    print("Blob contour data directory: "+CONTDIR)



    #========== Blob Data Directory Setup =============
    if os.path.exists(directory):
        if os.path.exists(OUTDIR):
            os.system('rm '+OUTDIR+"*.txt 2>/dev/null")
            if os.path.exists(IMGDIR) and os.path.exists(CONTDIR):
                os.system('rm '+IMGDIR+"* 2>/dev/null")
                os.system('rm '+CONTDIR+"* 2>/dev/null")
            else:
                os.system('mkdir '+IMGDIR)
                os.system('mkdir '+CONTDIR)
        else:
            os.system('mkdir '+OUTDIR)
            os.system('mkdir '+IMGDIR)
            os.system('mkdir '+CONTDIR)
    else:
        os.system('mkdir '+directory)
        os.system('mkdir '+OUTDIR)
        os.system('mkdir '+IMGDIR)
        os.system('mkdir '+CONTDIR)
    ############################################
    data_num = np.arange(start=fstart, stop=fend, step=dt, dtype=int)
    f = ad.file(INDIR+'asdex_phi_%d'%data_num[0]+'.bp')

    blob_size_file = open(OUTDIR+"/blob_size.txt", "w")
    
    Nx = f['numCells'][0]
    Ny = f['numCells'][1]
    Nz = f['numCells'][2]

    Xmin = f['lowerBounds'][0]
    Ymin = f['lowerBounds'][1]
    Zmin = f['lowerBounds'][2]

    Xmax = f['upperBounds'][0]
    Ymax = f['upperBounds'][1]
    Zmax = f['upperBounds'][2]

    dx = (Xmax - Xmin) / Nx
    dy = (Ymax - Ymin) / Ny

    z_slice = 10
    cnum = 100
    cnumout = 30
    color = 'jet'


    ################### INTERPOLATION ###########################

    def interpTestPoint(xWeight,yWeight,dx,dy,probeDensity):
        testDensity00 = probeDensity[0,0] * (dx-xWeight) * (dy-yWeight)
        testDensity01 = probeDensity[0,1] * xWeight * (dy-yWeight)
        testDensity10 = probeDensity[1,0] * (dx-xWeight) * yWeight
        testDensity11 = probeDensity[1,1] * xWeight * yWeight
        testDensity = ( testDensity00 + testDensity01 + testDensity10 + testDensity11 ) / (dx*dy)
        return testDensity

    ################### Shoelace formula to find polygon Area ###########################

    def PolyArea(x,y):
        return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
    ####################################################################################
    #################### RAY TRACING ALGORITHM #########################################
    ####################################################################################

    # A Python3 program to check if a given point lies inside a given polygon
    # Refer https://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/
    # for explanation of functions onSegment(), orientation() and doIntersect()

    # Define Infinite (Using INT_MAX caused overflow problems)
    INF = 10000

    class Point:
        def __init__(self, x, y):
            self.x = x
            self.y = y

    # Given three colinear points p, q, r, the function checks if
    # point q lies on line segment 'pr'
    def onSegment(p, q, r):
        if ( (q.x <= max(p.x, r.x)) and (q.x >= min(p.x, r.x)) and
               (q.y <= max(p.y, r.y)) and (q.y >= min(p.y, r.y))):
            return True
        return False

    def orientation(p, q, r):
        # to find the orientation of an ordered triplet (p,q,r)
        # function returns the following values:
        # 0 : Colinear points
        # 1 : Clockwise points
        # 2 : Counterclockwise

        # See https://www.geeksforgeeks.org/orientation-3-ordered-points/amp/
        # for details of below formula.

        val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))
        if (val > 0):
            # Clockwise orientation
            return 1
        elif (val < 0):
            # Counterclockwise orientation
            return 2
        else:
            # Colinear orientation
            return 0

    # The main function that returns true if
    # the line segment 'p1q1' and 'p2q2' intersect.
    def doIntersect(p1,q1,p2,q2):

        # Find the 4 orientations required for
        # the general and special cases
        o1 = orientation(p1, q1, p2)
        o2 = orientation(p1, q1, q2)
        o3 = orientation(p2, q2, p1)
        o4 = orientation(p2, q2, q1)

        # General case
        if ((o1 != o2) and (o3 != o4)):
            return True

        # Special Cases

        # p1 , q1 and p2 are colinear and p2 lies on segment p1q1
        if ((o1 == 0) and onSegment(p1, p2, q1)):
            return True

        # p1 , q1 and q2 are colinear and q2 lies on segment p1q1
        if ((o2 == 0) and onSegment(p1, q2, q1)):
            return True

        # p2 , q2 and p1 are colinear and p1 lies on segment p2q2
        if ((o3 == 0) and onSegment(p2, p1, q2)):
            return True

        # p2 , q2 and q1 are colinear and q1 lies on segment p2q2
        if ((o4 == 0) and onSegment(p2, q1, q2)):
            return True

        # If none of the cases
        return False

    # Returns true if the point p lies inside the polygon[] with n vertices
    def isInside(polygon, n, p):
        # There must be at least 3 vertices in polygon[]
        if (n < 3):
            return False

        # Create a point for line segment from p to infinite
        extreme = Point(INF, p.y)

        # Count intersections of the above line with sides of polygon
        count = 0
        i = 0

        # To initialize i for the first iteration of do-while loop of C++ type
        next = (i+1)%n
        # Check if the line segment from 'p' to 'extreme' intersects
        # with the line segment from 'polygon[i]' to 'polygon[next]'
        if (doIntersect(polygon[i], polygon[next], p, extreme)):
            # If the point 'p' is colinear with line segment 'i-next',
            # then check if it lies on segment. If it lies, return true,
            # otherwise false
            if (orientation(polygon[i], p, polygon[next]) == 0):
                return onSegment(polygon[i], p, polygon[next])
            count = count + 1
        i = next

        while (i != 0):
            next = (i+1)%n
            # Check if the line segment from 'p' to 'extreme' intersects
            # with the line segment from 'polygon[i]' to 'polygon[next]'
            if (doIntersect(polygon[i], polygon[next], p, extreme)):
                # If the point 'p' is colinear with line segment 'i-next',
                # then check if it lies on segment. If it lies, return true,
                # otherwise false
                if (orientation(polygon[i], p, polygon[next]) == 0):
                    return onSegment(polygon[i], p, polygon[next])
                count = count + 1
            i = next
            if (i == 0):
                break
        # Return true if count is odd, false otherwise
        if (count%2 == 1):
            return True
        else:
            return False

    ####################################################################################
    ####################################################################################
    ####################################################################################

    def func_data(ionDensityData,phiData):
    	ionDensityInterp = pg.data.GInterpModal(ionDensityData, 1, 'ms')
    	phiInterp = pg.data.GInterpModal(phiData, 1, 'ms')
    	interpGrid, ionDensityValues = ionDensityInterp.interpolate()
    	interpGrid, phiValues = phiInterp.interpolate()

    	#exValues = - np.gradient(phiValues,dx,axis = 0)
    	#dexdxValues = np.gradient(exValues,dx,axis = 0)
    	eyValues = - np.gradient(phiValues,dy,axis = 1)

    	# get cell center coordinates
    	CCC = []
    	for j in range(0,len(interpGrid)):
    	    CCC.append((interpGrid[j][1:] + interpGrid[j][:-1])/2)

    	x_vals = CCC[0]
    	y_vals = CCC[1]
    	z_vals = CCC[2]
    	X, Y = np.meshgrid(x_vals, y_vals)
    	ionDensityGrid = np.transpose(ionDensityValues[:,:,z_slice,0])
    	eyGrid = np.transpose(eyValues[:,:,z_slice,0])
    	return x_vals,y_vals,X,Y,ionDensityGrid,eyGrid

    def animate(i):
            blob_counter = 0
            ionDensity=INDIR+'asdex_ion_GkM0_%d'%data_num[i]+'.bp'
            phi=INDIR+'asdex_phi_%d'%data_num[i]+'.bp'
            ionDensityData = pg.data.GData(ionDensity)
            phiData = pg.data.GData(phi)

            x_vals,y_vals,X,Y,ionDensityGrid,eyGrid = func_data(ionDensityData,phiData)

            Nx = len(x_vals)
            Ny = len(y_vals)

            ax1.cla()
            ax1.set_title('Time = %d'%i+' $\\mu$s')

            cp1 = ax1.contourf(X, Y, ionDensityGrid, cnum, cmap=color)
            #cp2 = ax1.contour(X, Y, eyGrid, cnum, linewidths=0.1, colors='black', linestyles='solid')
            cp3 = ax1.contour(X, Y, ionDensityGrid, cnumout, linewidths=0.1, colors='black', linestyles='solid')
            #cp3 = ax1.contour(X, Y, ionDensityGrid, cnumout, linewidths=1, cmap=color)
            cp4 = ax1.contour(X, Y, ionDensityGrid, [thresholdDensity], linewidths=1, colors='black', linestyles='solid')
            # #plt.grid()
            # ax1.set_xticks(x_vals , minor=True)
            # ax1.set_yticks(y_vals , minor=True)
            # #ax1.grid(which='both')
            # ax1.grid(which='minor', alpha=0.9, color='k', linestyle='-')

            p = cp4.collections[0].get_paths()
            contour_number = len(p)
            imageCounter = 0
            for j in range(contour_number):
                p_new = cp4.collections[0].get_paths()[j]
                v = p_new.vertices
                x = v[:,0]
                y = v[:,1]
                x_min = np.min(x)
                x_max = np.max(x)
                y_min = np.min(y)
                y_max = np.max(y)
                blobMidX = (x_min + x_max)/2
                blobMidY = (y_min + y_max)/2
                blobLimX = abs(x_max - x_min)
                blobLimY = abs(y_max - y_min)
                if (abs(x[0]-x[len(x)-1]) <= 1e-10) and blobLimX > 2*dx and blobLimY > 2*dy:
                    polygon = []
                    for plgn in range(len(x)):
                        polygon.append(Point(x[plgn],y[plgn]))
                    npoly = len(polygon)
                    numTrial = 100
                    blobConfidence = 0
                    insideTrialPoints = 0
                    for numT in range(numTrial):
                        xT = 0.5*(x_max+x_min) - 0.5*(x_max-x_min)*(random()-0.5)
                        yT = 0.5*(y_max+y_min) - 0.5*(y_max-y_min)*(random()-0.5)
                        #print("Trial point",numT,"with",round(xT,4),round(yT,4),'for contour number %d'%j)
                        trialPoint = Point(xT,yT)
                        if isInside(polygon, npoly, trialPoint):
                            insideTrialPoints = insideTrialPoints + 1
                            #print("Trial point", numT, "is INSIDE for contour number %d"%j)
                            xd = abs(x_vals-xT)
                            yd = abs(y_vals-yT)
                            idx = np.where(xd <= 0.5*dx)
                            idy = np.where(yd <= 0.5*dy)
                            ionDensityFind = np.reshape(ionDensityGrid,Nx*Ny)
                            probeDensity = np.zeros((2,2))
                            for id in range(len(idx[0])):
                                for jd in range(len(idy[0])):
                                    probeDensity[id,jd] = ionDensityFind[(idy[0][jd] * Nx) + (idx[0][id] + 1)]

                            xGrid = np.zeros(2)
                            yGrid = np.zeros(2)
                            for id in range(len(idx[0])):
                                xGrid[id] = x_vals[idx[0][id]]
                            for jd in range(len(idy[0])):
                                yGrid[jd] = y_vals[idy[0][jd]]

                            xWeight = abs(xGrid[0]-xT)
                            yWeight = abs(yGrid[0]-yT)
                            testDensity = interpTestPoint(xWeight,yWeight,dx,dy,probeDensity)
                            if (testDensity >= thresholdDensity):
                                    #print("Interpolated point",numT,"with",round(xInterp,4),round(yInterp,4)," for Contour number %d"%j+" is INSIDE & truly a BLOB! Yeyy...")
                                    blobConfidence = blobConfidence + 1

                            else:
                                    None
                        else:
                            None
                            #print("Trial point", numT, " lies Outside before interpolation")

                    confidence = blobConfidence/insideTrialPoints
                    #print("Confidence = ",confidence*100,"%")
                    if (confidence > 0.80):
                        blob_counter = blob_counter + 1
                        polyArea = PolyArea(x,y)
                        #print(polyArea)
                        # print('File number = %d'%data_num[i]+', contour number %d'%j+' = It is TRULY a blob with confidence',confidence*100,"%")
                        blob_size_file.write('%d'%data_num[i]+'\t%d'%j+'\t%.8f'%blobLimX+'\t%.8f'%blobLimY+'\t%.8f'%blobMidX+'\t%.8f'%blobMidY+'\t%.8f'%polyArea+'\n')
                        if imageCounter == 0:
                            plt.savefig(IMGDIR+"/file_number%d"%data_num[i]+"_blob_snap.png")   # save the figure to file
                        imageCounter = imageCounter + 1
                        #print("blobConfidence=",blobConfidence,"insideTrialPoints=",insideTrialPoints)
                        blob_file = open(CONTDIR+"/file_number%d"%data_num[i]+"_contour_number_%d"%j+".txt", "w")
                        for k in range(len(x)):
                            blob_file.write('%.8f'%x[k]+'\t%.8f'%y[k]+'\n')
                        blob_file.close()
                elif (abs(x[0]-x[len(x)-1]) <= 1e-10):
                    None
                    # print('File number = %d'%data_num[i]+', contour number %d'%j+' = It is a sub-grid-sized closed contour')
                else:
                    None
                    # print('File number = %d'%data_num[i]+', contour number %d'%j+' = It is open line & NOT a blob')
                    #print(x,y)
                    #for k in range(len(x)):
                        #print(round(x[k],7),round(y[k],7))

            if blob_counter == 0:
                None
                # print("No blob found for file number = %d"%data_num[i])
            sleep(0.1)
            pbar.update(pstep)
            #plt.grid(True)
            ax1.set_xlabel("X",fontsize=14)
            ax1.set_ylabel("Y",fontsize=14)
            #ax1.tick_params(axis='both', which='major', labelsize=12)
            del ionDensityData
            del phiData

    if (show_anim == True):
        fig,ax1 = plt.subplots(1,1,figsize=(8,5),dpi=150)
        plt.rcParams["font.size"] = "12"
        plt.rcParams["font.family"] = "Times New Roman"
        #To keep the colorbar static:
        ionDensity=INDIR+'asdex_ion_GkM0_%d'%data_num[0]+'.bp'
        phi=INDIR+'asdex_phi_%d'%data_num[0]+'.bp'
        ionDensityData = pg.data.GData(ionDensity)
        phiData = pg.data.GData(phi)
        x_vals,y_vals,X,Y,ionDensityGrid,eyGrid = func_data(ionDensityData,phiData)
        cp1 = ax1.contourf(X, Y, ionDensityGrid, cnum, cmap=color)
        fig.colorbar(cp1)
        #TColorbar fixing completed:
        pstep = 1#len(data_num)/100
        pbar = tqdm(total=len(data_num))
        ani = animation.FuncAnimation(fig,animate,frames=len(data_num),interval=(1/fps)*1e+3,blit=False,repeat=False)
        ax1.set_xticks(x_vals , minor=True)
        ax1.set_yticks(y_vals , minor=True)
        ax1.grid(which='both')
        ax1.grid(which='minor', alpha=0.2, color='b', linestyle='--')
        #ax1.grid(b=True, which='major', color='b', linestyle='-')
        plt.show()
        if(save_anim == True):
            try:
                Writer = animation.writers['ffmpeg']
                writer = Writer(fps=fps, metadata=dict(artist='Me'), bitrate=1800)
            except RuntimeError:
                print("ffmpeg not available trying ImageMagickWriter")
                writer = animation.ImageMagickWriter(fps=fps)
            ani.save('animation.mp4')
    pbar.close()
    blob_size_file.close()
    return 0
Ejemplo n.º 27
0
import adios as ad
import getopt, sys
import os

method = "BP"
init = "verbose=3;"

if len(sys.argv) > 1:
    method = sys.argv[1]

if len(sys.argv) > 2:
    init = sys.argv[2]

ad.read_init(method, parameters=init)

f = ad.file("temp.bp", method, is_stream=True, timeout_sec = 10.0)
f.printself()

i = 0
while True:
    print ">>> step:", i
    v = f.var['temperature']
    v.printself()

    val = v.read(nsteps=1)
    print val

    if (f.advance() < 0):
        break
    i += 1
Ejemplo n.º 28
0
    def _loadSequence(self):
        # Sequence load typically cancatenates multiple files
        files = glob('{:s}*'.format(self.fileName))
        if not files:
            raise NameError("File(s) '{:s}' not found or empty.".format(
                self.fileName))

        cnt = 0  # Counter for the number of loaded files
        for fileName in files:
            extension = fileName.split('.')[-1]
            if extension == 'h5':
                self.fileType = 'hdf5'
                fh = tables.open_file(fileName, 'r')
                if '/DataStruct/data' in fh and \
                   '/DataStruct/timeMesh' in fh:
                    grid = fh.root.DataStruct.timeMesh.read()
                    values = fh.root.DataStruct.data.read()
                    fh.close()
                else:
                    fh.close()
                    continue
                #end
            elif extension == 'bp':
                self.fileType = 'adios'
                fh = adios.file(fileName)
                timeMeshList = [
                    key for key, val in fh.vars.items() if 'TimeMesh' in key
                ]
                dataList = [
                    key for key, val in fh.vars.items() if 'Data' in key
                ]
                if len(dataList) > 0:
                    for i in range(len(dataList)):
                        if i == 0:
                            values = adios.var(fh, dataList[i]).read()
                            grid = adios.var(fh, timeMeshList[i]).read()
                        else:
                            newvals = adios.var(fh, dataList[i]).read()
                            # deal with weird behavior after restart where some data doesn't have second dimension
                            if len(newvals.shape) < 2:
                                newvals = np.expand_dims(newvals, axis=1)
                            #end
                            values = np.append(values, newvals, axis=0)
                            grid = np.append(grid,
                                             adios.var(fh,
                                                       timeMeshList[i]).read(),
                                             axis=0)
                        #end
                    #end
                    fh.close()
                else:
                    fh.close()
                    continue
                #end
            else:
                continue
            #end

            if cnt > 0:
                self._grid = np.append(self._grid, grid, axis=0)
                self._values = np.append(self._values, values, axis=0)
            else:
                self._grid = grid
                self._values = values
            #end
            cnt += 1
        #end

        if cnt == 0:  # No files loaded
            raise NameError("File(s) '{:s}' not found or empty.".format(
                self.fileName))
        #end
        # Squeeze the time coordinate ...
        if len(self._grid.shape) > 1:
            self._grid = np.squeeze(self._grid)
        #end
        # ... and make it a list following the Postgkyl grid conventions
        self._grid = [self._grid]

        # glob() doesn't guarantee the right order
        sortIdx = np.argsort(self._grid[0])
        self._grid[0] = self._grid[0][sortIdx]
        self._values = self._values[sortIdx, ...]
Ejemplo n.º 29
0
    def load(self, filename):
        print "Get a dataframe for ", filename

        f = ad.file(filename)
        f.read()
        return pd.DataFrame(f)
Ejemplo n.º 30
0
import adios as ad
import getopt, sys
import os

method = "BP"
init = "verbose=3;"

if len(sys.argv) > 1:
    method = sys.argv[1]

if len(sys.argv) > 2:
    init = sys.argv[2]

ad.read_init(method, parameters=init)

f = ad.file("temp.bp", method, is_stream=True, timeout_sec = 10.0)
f.printself()

i = 0
while True:
    print(">>> step:", i)
    v = f.var['temperature']
    v.printself()

    val = v.read(nsteps=1)
    print(val)

    if (f.advance() < 0):
        break
    i += 1
Ejemplo n.º 31
0
def Parser(configFile):
    method = "BP"
    init = "verbose=3;"

    # read parameters from configuration file
    config = configparser.ConfigParser()
    config.read(configFile)
    in_bp_file = config['Parser']['InputBPFile']  # input bp file path
    prov_db_path = config['Parser'][
        'ProvDBPath']  # provenance output database path
    queue_size = int(config['Parser']['QueueSize'])  # provenance data size
    int_func_num = int(
        config['Parser']['InterestFuncNum'])  # interested function size

    # initialize adios streaming mode
    ad.read_init(method, parameters=init)
    fin = ad.file(in_bp_file, method, is_stream=True, timeout_sec=10.0)
    fout = open(prov_db_path, "wb")

    # read attributes
    db = dq(maxlen=queue_size)
    name = np.array([
        'prog_names', 'comm_ranks', 'threads', 'event_types', 'func_names',
        'counters', 'counter_value', 'event_types_comm', 'tag', 'partner',
        'num_bytes', 'timestamp'
    ]).reshape(1, 12)
    attr = fin.attr
    nattrs = fin.nattrs
    attr_name = list(fin.attr)
    attr_value = np.empty(nattrs, dtype=object)
    num_func = 0
    func_name = []
    for i in range(0, len(attr_name)):
        attr_value[i] = attr[attr_name[i]].value
        # count function number and names
        if attr_name[i].startswith('timer'):
            num_func = num_func + 1
            func_name.append(attr_value[i])
        if attr_name[i].startswith('event_type'):
            print(attr_value[i])
    attr_name = np.array(attr_name)
    func_name = np.array(func_name)

    i = 0
    total_timestep = 0
    anomaly_indices = []
    while True:
        print(">>> step:", i)

        vname = "event_timestamps"
        if vname in fin.vars:
            var = fin.var[vname]
            num_steps = var.nsteps
            event = var.read(nsteps=num_steps)
            data_event = np.zeros((event.shape[0], 12), dtype=object) + np.nan
            data_event[:, 0:5] = event[:, 0:5]
            data_event[:, 11] = event[:, 5]
            data_step = data_event
            # count most common functions
            int_func = ct(data_event[:, 4]).most_common(
                int_func_num
            )  # e.g., [(16, 14002), (15, 14000), (13, 6000),...]

        vname = "counter_values"
        if vname in fin.vars:
            var = fin.var[vname]
            num_steps = var.nsteps
            counter = var.read(nsteps=num_steps)
            data_counter = np.zeros(
                (counter.shape[0], 12), dtype=object) + np.nan
            data_counter[:, 0:3] = counter[:, 0:3]
            data_counter[:, 5:7] = counter[:, 3:5]
            data_counter[:, 11] = counter[:, 5]
            data_step = np.concatenate((data_step, data_counter), axis=0)

        vname = "comm_timestamps"
        if vname in fin.vars:
            var = fin.var[vname]
            num_steps = var.nsteps
            comm = var.read(nsteps=num_steps)
            data_comm = np.zeros((comm.shape[0], 12), dtype=object) + np.nan
            data_comm[:, 0:4] = comm[:, 0:4]
            data_comm[:, 8:11] = comm[:, 4:7]
            data_comm[:, 11] = comm[:, 7]
            data_step = np.concatenate((data_step, data_comm), axis=0)

        # sort data in this step by timestamp
        data_step = data_step[data_step[:, 11].argsort()]

        if i == 0:
            data_global = data_step
        else:
            data_global = np.concatenate((data_global, data_step), axis=0)

        # lauch anomaly detection
        anomaly_flag = False

        # dynamic interest list
        if len(int_func) < 3:
            print("Most interested function:\n", func_name[int_func[0][0]])
        else:
            print("Most three interested functions:\n",
                  func_name[int_func[0][0]], "\n", func_name[int_func[1][0]],
                  "\n", func_name[int_func[2][0]])

        # matching data
        global_index = (np.arange(data_step.shape[0]) +
                        total_timestep).reshape(data_step.shape[0], 1)
        data_step = np.append(data_step, global_index, axis=1)
        func_data = data_step[data_step[:, 4] == 21]  # 21 is adios_close, TODO
        entry_data = func_data[func_data[:, 3] ==
                               0]  # 0 is entry in the current data, TODO
        exit_data = func_data[func_data[:, 3] == 1]  # TODO

        # generating streaming data in terms of one function
        datastream = []
        for j in range(0, entry_data.shape[0]):
            for k in range(0, exit_data.shape[0]):
                if np.array_equal(entry_data[j, 0:3], exit_data[k, 0:3]):
                    entry_time = entry_data[j, 11]
                    exec_time = exit_data[k, 11] - entry_data[j, 11]
                    datastream += [[entry_time, exec_time]]
                    break
        datastream = np.array(datastream)

        # anomaly detection
        if (datastream.shape[0]):
            scaler = MinMaxScaler()
            scaler.fit(datastream)
            datastream = scaler.transform(datastream)
            # Should call MILOF API, but here for simplicity, call LOF directly
            clf = LocalOutlierFactor(algorithm="kd_tree", metric='euclidean')
            anomalies = entry_data[clf.fit_predict(datastream) == -1]
            if anomalies.shape[0]:
                anomaly_indices.extend(anomalies[:, -1].tolist())
                anomaly_flag = True

        # add or dump queue
        if anomaly_flag:
            # dump queue to file
            db.appendleft(attr_value)
            db.appendleft(attr_name)
            db.appendleft(nattrs)
            print(">>> Identified anomalies and dump data to binary.")
            print(">>> Serialization ...")
            pickle.dump(db, fout)
            # db[0]: the number of attributes
            # db[1]: the names of attributes
            # db[2]: the values of attributes
            # from db[3]: the trace data
        else:
            # add data to queue
            db.extend(data_step)

        print("Size of current timestep =", data_step.shape[0])
        total_timestep += data_step.shape[0]
        print("Size of total timestep = ", total_timestep)

        print(">>> Advance to next step ... ")
        if (fin.advance() < 0):
            break

        i += 1

    fin.close()
    fout.close()

    print(">>> Complete passing data.")
    print(">>> Test of deserialization.")
    print(">>> Load data ...")
    fin = open(prov_db_path, "rb")
    db2 = pickle.load(fin)
    print(">>> Passed test of deserialization.")

    print("\n**** Print info ****")
    print(">>> Number of attributes =", db2[0])
    print(">>> First 20 Names of attributes =", db2[1][0:20])
    print(">>> First 20 Values of attributes =", db2[2][0:20])
    print(">>> First 20 trace data =",
          np.array(list(itertools.islice(db2, 3, 20))))
    print(">>> Indices of anomalies in terms of entry:", anomaly_indices)
    fin.close()

    import json
    file_path = "data.json"
    with open(file_path, 'w') as outfile:
        json.dump(data_global.tolist(), outfile)
    outfile.close()

    file_path = "anomaly.json"
    with open(file_path, 'w') as outfile:
        json.dump(anomaly_indices, outfile)
    outfile.close()
Ejemplo n.º 32
0
def getTime(dataFile):
    #.Extract the time from file.
    hF = ad.file(dataFile)
    timeOut = hF['time'].read()
    hF.close()
    return timeOut
Ejemplo n.º 33
0
size = 2
t = np.array(list(range(NX * size)), dtype=np.float64)
tt = t.reshape((size, NX))

print("\n>>> Writing ...\n")
ad.init_noxml()
ad.allocate_buffer(ad.BUFFER_ALLOC_WHEN.NOW, 10)

fw = ad.writer(fname)
fw.declare_group('group', method='POSIX1')

fw['NX'] = NX
fw['size'] = size
fw['temperature'] = tt
fw.attrs[
    '/temperature/description'] = "Global array written from 'size' processes"
fw.close()

## Reading
print("\n>>> Reading ...\n")

f = ad.file(fname)
for key, val in f.vars.items():
    print(key, '=', val.read())

for key, val in f.attrs.items():
    print(key, '=', val.value)

## Testing
print("\n>>> Done.\n")
Ejemplo n.º 34
0
 def getInputFile(self):
     fh = adios.file(self.fileName)
     inputFile = adios.attr(fh, 'inputfile').value.decode('UTF-8')
     fh.close()
     return inputFile
Ejemplo n.º 35
0
import adios as ad
import numpy as np
from matplotlib.tri import Triangulation, LinearTriInterpolator
from IPython.parallel import Client

rc = Client()
dview = rc[:]
with dview.sync_imports():  #these required by findblobsXGC
    import matplotlib.pyplot as plt
    import numpy as np
    from findblobsXGC import findblobsXGC

#get data from f3d
fileDir = '/ccs/home/rchurchi/scratch/ti252_ITER_new_profile/'
#mesh
fm = ad.file(fileDir + 'xgc.mesh.bp')
RZ = fm['/coordinates/values'][...]
tri = fm['nd_connect_list'][...]
psi = fm['psi'][...]
psi_x = 11.10093394162000
psin = psi / psi_x
eq_x_z = -3.442893939000000
fm.close()
spaceinds = (psin > 0.95) & (psin < 1.05) & ((RZ[:, 1] >= eq_x_z) |
                                             (psin >= 1))

tmp = spaceinds[tri]  #rzspaceinds T/F array, same size as R
goodTri = np.all(tmp,
                 axis=1)  #only use triangles who have all vertices in rzInds
tri = tri[goodTri, :]
#remap indices in triangulation
Ejemplo n.º 36
0
    def _loadFrame(self, axes=(None, None, None, None, None, None), comp=None):
        self.fileDir = path.dirname(path.realpath(self.fileName))
        extension = self.fileName.split('.')[-1]
        if extension == 'h5':
            fh = tables.open_file(self.fileName, 'r')
            if not '/StructGridField' in fh:
                fh.close()
                self._loadSequence()
                return
            #end

            # Get the atributes
            lower = fh.root.StructGrid._v_attrs.vsLowerBounds
            upper = fh.root.StructGrid._v_attrs.vsUpperBounds
            cells = fh.root.StructGrid._v_attrs.vsNumCells
            # Load data ...
            self._values = fh.root.StructGridField.read()
            # ... and the time-stamp
            if '/timeData' in fh:
                self.meta['time'] = fh.root.timeData._v_attrs.vsTime
            #end
            fh.close()
        elif extension == 'bp':
            fh = adios.file(self.fileName)
            if not self._varName in fh.vars:
                # Not a Gkyl "frame" data; trying to load as a sequence
                fh.close()
                self._loadSequence()
                return
            #end
            # Get the atributes
            self.attrsList = {}
            for k in fh.attrs.keys():
                self.attrsList[k] = 0
            #end
            # Postgkyl conventions require the attributes to be
            # narrays even for 1D data
            lower = np.atleast_1d(adios.attr(fh, 'lowerBounds').value)
            upper = np.atleast_1d(adios.attr(fh, 'upperBounds').value)
            cells = np.atleast_1d(adios.attr(fh, 'numCells').value)
            if 'changeset' in fh.attrs.keys():
                self.meta['changeset'] = adios.attr(
                    fh, 'changeset').value.decode('UTF-8')
            #end
            if 'builddate' in fh.attrs.keys():
                self.meta['builddate'] = adios.attr(
                    fh, 'builddate').value.decode('UTF-8')
            #end
            if 'polyOrder' in fh.attrs.keys():
                self.meta['polyOrder'] = adios.attr(fh, 'polyOrder').value
                self.meta['isModal'] = True
            #end
            if 'basisType' in fh.attrs.keys():
                self.meta['basisType'] = adios.attr(
                    fh, 'basisType').value.decode('UTF-8')
                self.meta['isModal'] = True
            #end
            if 'charge' in fh.attrs.keys():
                self.meta['charge'] = adios.attr(fh, 'charge').value
            #end
            if 'mass' in fh.attrs.keys():
                self.meta['mass'] = adios.attr(fh, 'mass').value
            #end
            if 'time' in fh.vars:
                self.meta['time'] = adios.var(fh, 'time').read()
            #end
            if 'frame' in fh.vars:
                self.meta['frame'] = adios.var(fh, 'frame').read()
            #end

            # Check for mapped grid ...
            if "type" in fh.attrs.keys() and self._compGrid is False:
                self._gridType = adios.attr(fh, "type").value.decode('UTF-8')
            #end
            # .. load nodal grid if provided ...
            if self._gridType == "uniform":
                pass  # nothing to do for uniform grids
            elif self._gridType == "mapped":
                if "grid" in fh.attrs.keys():
                    gridNm = self.fileDir + '/' + adios.attr(
                        fh, "grid").value.decode('UTF-8')
                else:
                    gridNm = self.fileDir + "/grid"
                #end
                with adios.file(gridNm) as gridFh:
                    gridVar = adios.var(gridFh, self._varName)
                    offset, count = self._createOffsetCountBp(
                        gridVar, axes, None)
                    tmp = gridVar.read(offset=offset, count=count)
                    grid = [
                        tmp[..., d].transpose()
                        #for d in range(len(cells))]
                        for d in range(tmp.shape[-1])
                    ]
                    self._grid = grid
                #end
            elif self._gridType == "nonuniform":
                raise TypeError("'nonuniform' is not presently supported")
            else:
                raise TypeError("Unsupported grid type info in field!")
            #end

            # Load data
            var = adios.var(fh, self._varName)
            offset, count = self._createOffsetCountBp(var, axes, comp)
            self._values = var.read(offset=offset, count=count)

            # Adjust boundaries for 'offset' and 'count'
            numDims = len(cells)
            dz = (upper - lower) / cells
            if offset:
                if self._gridType == "uniform":
                    lower = lower + offset[:numDims] * dz
                    cells = cells - offset[:numDims]
                elif self._gridType == "mapped":
                    idx = np.full(numDims, 0)
                    for d in range(numDims):
                        #print(idx)
                        lower[d] = self._grid[d][tuple(idx)]
                        cells[d] = cells[d] - offset[d]
                    #end
                #end
            #end
            if count:
                if self._gridType == "uniform":
                    upper = lower + count[:numDims] * dz
                    cells = count[:numDims]
                elif self._gridType == "mapped":
                    idx = np.full(numDims, 0)
                    for d in range(numDims):
                        idx[-d - 1] = count[
                            d] - 1  #.Reverse indexing of idx because of transpose() in composing self._grid.
                        upper[d] = self._grid[d][tuple(idx)]
                        cells[d] = count[d]
                    #end
                #end
            #end
            fh.close()
        elif extension == 'gkyl':
            dti8 = np.dtype("i8")
            dtf = np.dtype("f8")
            doffset = 8

            offset = 0

            # read real-type
            realType = np.fromfile(self.fileName, dtype=dti8, count=1)[0]
            if realType == 1:
                dtf = np.dtype("f4")
                doffset = 4
            #end

            offset += 8

            # read grid dimensions
            numDims = np.fromfile(self.fileName,
                                  dtype=dti8,
                                  count=1,
                                  offset=offset)[0]
            offset += 8

            # read grid shape
            cells = np.fromfile(self.fileName,
                                dtype=dti8,
                                count=numDims,
                                offset=offset)
            offset += numDims * 8

            # read lower/upper
            lower = np.fromfile(self.fileName,
                                dtype=dtf,
                                count=numDims,
                                offset=offset)
            offset += numDims * doffset

            upper = np.fromfile(self.fileName,
                                dtype=dtf,
                                count=numDims,
                                offset=offset)
            offset += numDims * doffset

            # read array elemEz (the div by doffset is as elemSz includes sizeof(real_type) = doffset)
            elemSzRaw = int(
                np.fromfile(self.fileName, dtype=dti8, count=1,
                            offset=offset)[0])
            elemSz = elemSzRaw / doffset
            offset += 8

            # read array size
            asize = np.fromfile(self.fileName,
                                dtype=dti8,
                                count=1,
                                offset=offset)[0]
            offset += 8

            adata = np.fromfile(self.fileName, dtype=dtf, offset=offset)
            gshape = np.ones(numDims + 1, dtype=np.dtype("i8"))
            for d in range(numDims):
                gshape[d] = cells[d]
            #end
            numComp = elemSz
            gshape[-1] = int(numComp)
            self._values = adata.reshape(gshape)
        else:
            raise NameError(
                "File extension '{:s}' is not supported".format(extension))
        #end

        numDims = len(cells)
        dz = (upper - lower) / cells
        # Adjusts bounds in case ghost layer is included in data
        for d in range(numDims):
            if cells[d] != self._values.shape[d]:
                if self._gridType == "mapped":
                    raise ValueError(
                        "Data appears to include ghost cells which is not compatible with mapped grid. Use computational grid 'compgrid' instead."
                    )
                #end
                ngl = int(np.floor((cells[d] - self._values.shape[d]) * 0.5))
                ngu = int(np.ceil((cells[d] - self._values.shape[d]) * 0.5))
                cells[d] = self._values.shape[d]
                lower[d] = lower[d] - ngl * dz[d]
                upper[d] = upper[d] + ngu * dz[d]
            #end
        #end

        # Construct grids if not loaded already
        if not self._grid is not None:
            grid = [
                np.linspace(lower[d], upper[d], cells[d] + 1)
                for d in range(numDims)
            ]
            self._grid = grid
Ejemplo n.º 37
0
# read parameters from configuration file
config = configparser.ConfigParser()
config.read(configFile)
in_bp_file = config['Parser']['InputBPFile']  # input bp file path
prov_db_path = config['Parser'][
    'ProvDBPath']  # provenance output database path
queue_size = int(config['Parser']['QueueSize'])  # provenance data size
int_func_num = int(
    config['Parser']['InterestFuncNum'])  # interested function size
vis_url = config['Parser']['VisURL']
fixed_func = config['Parser']['IntFunc']

# initialize adios streaming mode
ad.read_init(method, parameters=init)
fin = ad.file(in_bp_file, method, is_stream=True, timeout_sec=10.0)
fout = open(prov_db_path, "wb")

# read attributes
db = dq(maxlen=queue_size)
name = np.array([
    'prog_names', 'comm_ranks', 'threads', 'event_types', 'func_names',
    'counters', 'counter_value', 'event_types_comm', 'tag', 'partner',
    'num_bytes', 'timestamp'
]).reshape(1, 12)
attr = fin.attr
nattrs = fin.nattrs
attr_name = list(fin.attr)
attr_value = np.empty(nattrs, dtype=object)
num_func = 0
func_name = []
Ejemplo n.º 38
0
NX = 10
size = 2
t = np.array(range(NX*size), dtype=np.float64)
tt = t.reshape((size, NX))

print "\n>>> Writing ...\n"
ad.init_noxml()
ad.allocate_buffer (ad.BUFFER_ALLOC_WHEN.NOW, 10);

fw = ad.writer(fname)
fw.declare_group('group', method='POSIX1')

fw['NX'] = NX
fw['size'] = size
fw['temperature'] = tt
fw.attrs['/temperature/description'] = "Global array written from 'size' processes"
fw.close()

## Reading
print "\n>>> Reading ...\n"

f = ad.file(fname)
for key, val in f.vars.iteritems():
    print key, '=', val.read()

for key, val in f.attrs.iteritems():
    print key, '=', val.value

## Testing
print "\n>>> Done.\n"
Ejemplo n.º 39
0
from __future__ import division
import numpy as np
import adios as ad
import matplotlib.pyplot as plt
import sys

name_hat = "/home/luo/Dropbox/SC2018/TOB/figures/"

fmesh = ad.file('xgc.mesh.bp')
rz = fmesh['/coordinates/values'][:]
conn = fmesh['/cell_set[0]/node_connect_list'][:]
R = rz[:, 0]
Z = rz[:, 1]

plt.rc('xtick', labelsize=24)  # fontsize of the tick labels  ,
plt.rc('ytick', labelsize=24)
axis_font = {'fontname': 'Times New Roman', 'size': '30'}

dpot = np.fromfile("SZ.reconstruction")
dpot = dpot.reshape(-1, 512)
dpot1 = dpot[:, 0]

plt.figure(figsize=(8, 6))
plt.gcf().subplots_adjust(left=0.2, bottom=0.20)
plt.xlabel('R\n(b) SZ', **axis_font)
plt.ylabel('Z', **axis_font)
print len(dpot1)
plt.tricontourf(R,
                Z,
                conn,
                dpot1,
Ejemplo n.º 40
0
size = 2
groupsize =  4 + 4 + 8 * size * NX
t = np.array(range(NX*size), dtype=np.float64)
tt = t.reshape((size, NX))
ad.set_group_size(fd, groupsize)
ad.write_int(fd, "NX", NX)
ad.write_int(fd, "size", size)
ad.write(fd, "temperature", tt)
ad.close(fd)

ad.finalize()

## Reading
print "\n>>> Reading ...\n"

f = ad.file("adios_test.bp")
f.printself()

v = f.var['temperature']
v.printself()

val = v.read()
print val
assert ((tt == val).all())
f.close()

## Testing
print "\n>>> Test utility functions ...\n"

print "bpls:\n", ad.bpls('adios_test.bp')
print "readvar:\n", ad.readvar("adios_test.bp", "temperature")
Ejemplo n.º 41
0
def main_plotting(fileroot, max_output):

    fileName = fileroot
    iFrame = 0  #.Initial frame (0 is the t=0 frame).
    fFrame = max_output  #.Final frame.

    polyOrder = 2
    basisType = 'ms'
    m_ion = 25
    vTe0 = 0.02
    alpha = 0.00

    #.Component of the quantity we wish to extract from data file.
    #.For field files this specifies the field component (e.g. Ex,
    #.Ey, Ez, Bx, By, or Bz) while for Mi1 it specifies the vector
    #.component of the momentum density.
    compZero = 0

    #..................... NO MORE USER INPUTS BELOW (maybe) ....................#

    nFrames = fFrame - iFrame + 1  #.Number of frames plotted.

    #.Extract grid details from one of the data files for each species
    fName_elc = fileName + '_elc_' + str(iFrame) + '.bp'
    fName_ion = fileName + '_ion_' + str(iFrame) + '.bp'

    # getGrid data
    x_elc, _, nx, lx, _ = pgu.getGrid(fName_elc,
                                      polyOrder,
                                      basisType,
                                      location='center')
    x_ion, _, _, _, _ = pgu.getGrid(fName_ion,
                                    polyOrder,
                                    basisType,
                                    location='center')
    #x_e, gridDim, nx, lx, dx = pgu.getGrid(fName,polyOrder,basisType,location='center')
    #    x_e = np.array(x_e)  #RLW: Not needed

    #Store needed data from getGrid
    nz = nx[0]
    ny = nx[1]
    lz = lx[0]  #get physical z location of center of box
    ly = lx[1]
    #information about the X grid (same for both species)
    points_z = np.linspace(-lz / 2, lz / 2, nz)
    points_y = np.linspace(-ly / 2, ly / 2, ny)
    #information about the V grid for the electrons
    vz_elc_min = x_elc[2][0]
    vz_elc_max = x_elc[2][-1]
    vy_elc_min = x_elc[3][0]
    vy_elc_max = x_elc[3][-1]
    #information about the V grid for the ions
    vz_ion_min = x_ion[2][0]
    vz_ion_max = x_ion[2][-1]
    vy_ion_min = x_ion[3][0]
    vy_ion_max = x_ion[3][-1]

    # getInterpData data
    elcd = np.squeeze(
        pgu.getInterpData(fName_elc, polyOrder, basisType, comp=compZero))
    iond = np.squeeze(
        pgu.getInterpData(fName_ion, polyOrder, basisType, comp=compZero))

    #obtaining information about the grid in velocity space for electrons
    z0_elc = int(elcd.shape[0] /
                 2)  #rlw: get z coordinate of center of box (it is 48)
    y0_elc = int(elcd.shape[1] / 2)
    vzsize_elc = int(elcd.shape[2])
    vysize_elc = int(elcd.shape[3])
    velocitiesz_elc = np.linspace(vz_elc_min, vz_elc_max, vzsize_elc)
    velocitiesy_elc = np.linspace(vy_elc_min, vy_elc_max, vysize_elc)
    #obtaining information about the grid in velocity space for ions
    z0_ion = int(iond.shape[0] / 2)
    y0_ion = int(iond.shape[1] / 2)
    vzsize_ion = int(iond.shape[2])
    vysize_ion = int(iond.shape[3])
    velocitiesz_ion = np.linspace(vz_ion_min, vz_ion_max, vzsize_ion)
    velocitiesy_ion = np.linspace(vy_ion_min, vy_ion_max, vysize_ion)

    #Setting up the grids in V space for plotting
    Vz_elc, Vy_elc = np.meshgrid(velocitiesz_elc,
                                 velocitiesy_elc,
                                 indexing='ij')
    Vz_ion, Vy_ion = np.meshgrid(velocitiesz_ion,
                                 velocitiesy_ion,
                                 indexing='ij')
    #Setting up the grids in X space for plotting
    grid_z, grid_y = np.meshgrid(points_z, points_y, indexing='ij')

    #initialize all arrays containing time-dependent quantities...that are going to be filled in during loop over frames
    times = np.zeros(nFrames)
    eField_boxavg_z = np.zeros(nFrames)
    Rei = np.zeros(nFrames)
    J_boxavg_z = np.zeros(nFrames)
    eField_fluct_squared = np.zeros(nFrames)
    eField_fluct_squared_byT = np.zeros(nFrames)
    E_over_J_rolling = np.zeros(nFrames)
    R_over_J_rolling = np.zeros(nFrames)
    energy_e_tot = np.zeros(nFrames)
    energy_b_tot = np.zeros(nFrames)
    E_boxavg_over_J_boxavg = np.zeros(nFrames)
    Tion_boxavg = np.zeros(nFrames)
    Telc_boxavg = np.zeros(nFrames)
    nu_Sagdeev = np.zeros(nFrames)

    #    Tion_boxavg_over_elc = np.zeros(nFrames)

    #    arrays the used to be initialized that did not need to be (though it may be slower)
    #    currentDen = np.zeros((nz,ny))
    #    Den        = np.zeros((nz,ny))
    #    ek_t = np.zeros((nFrames,int(nz/2+1),int(ny/2+1)))

    #this is the electron distribution function at a specific location in space as a function of time
    #    elcd_x0t = np.zeros((nFrames,vzsize_e,vysize_e))  #Never used!  Probably meant to be elcd_cut
    #    iond_x0t = np.zeros((nFrames,vzsize_i,vysize_i))
    for nFr in range(iFrame, fFrame + 1):
        fName_elc = fileName + '_elc_' + str(nFr) + '.bp'
        fNameM0_elc = fileName + '_elc_M0_' + str(nFr) + '.bp'
        fNameM1_elc = fileName + '_elc_M1i_' + str(
            nFr) + '.bp'  #.Complete file name.
        fName_vT_elc = fileName + '_elc_vthSq_' + str(nFr) + '.bp'
        fName_u_elc = fileName + '_elc_u_' + str(nFr) + '.bp'

        #        fNameM2_elc = fileName+'_elc_intM2Thermal_'+str(nFr)+'.bp'
        #        for JJ: what is in the vthSqCross file?
        #        for JJ: what is in the vthSq file?_uCross, u_, _intM2Flow, _intL2
        #       intM2Flow = \int dx n*(u^2)
        #       intL2 = \int dx \int dv f^2
        #       vthSq = v_t^2
        #       u = u, the mean flow
        #       cross ones are special for cross-collisions. you probably don’t need them but their formulas are in https://gkeyll.readthedocs.io/en/latest/dev/collisionmodels.html (2nd and 3rd equations below Dougherty collisions if you are using LBO collisions)

        fName_ion = fileName + '_ion_' + str(nFr) + '.bp'
        fNameM0_ion = fileName + '_ion_M0_' + str(nFr) + '.bp'
        fNameM1_ion = fileName + '_ion_M1i_' + str(
            nFr) + '.bp'  #.Complete file name.
        fName_vT_ion = fileName + '_ion_vthSq_' + str(nFr) + '.bp'
        fName_u_ion = fileName + '_ion_u_' + str(nFr) + '.bp'

        fName_field = fileName + '_field_' + str(nFr) + '.bp'

        elcd = np.squeeze(pgu.getInterpData(fName_elc, polyOrder, basisType))
        elcM0 = np.squeeze(
            pgu.getInterpData(fNameM0_elc, polyOrder, basisType, comp=0)
        )  # JJ: are we sure that we have to specify the keyword comp?
        elcM1_z = np.squeeze(
            pgu.getInterpData(fNameM1_elc, polyOrder, basisType, comp=0))
        elcM1_y = np.squeeze(
            pgu.getInterpData(fNameM1_elc, polyOrder, basisType, comp=1))
        elc_u_z = np.squeeze(
            pgu.getInterpData(fName_u_elc, polyOrder, basisType, comp=0))
        elc_u_y = np.squeeze(
            pgu.getInterpData(fName_u_elc, polyOrder, basisType, comp=1))
        elcM1_raw = np.squeeze(pgu.getRawData(fNameM1_elc))
        elc_vTsq = np.squeeze(
            pgu.getInterpData(fName_vT_elc, polyOrder, basisType))
        elc_vT = np.sqrt(elc_vTsq)
        elcT = elc_vTsq / (
            (vTe0)**2
        )  #This is an array of temperature values: one for each spatial location
        elcT_boxavg = np.average(elcT)
        #        elcM2 = np.squeeze(pgu.getRawData(fNameM2_elc))

        iond = np.squeeze(pgu.getInterpData(fName_ion, polyOrder, basisType))
        ionM0 = np.squeeze(
            pgu.getInterpData(fNameM0_ion, polyOrder, basisType, comp=0)
        )  # JJ: are we sure that we have to specify the keyword comp
        ionM1_z = np.squeeze(
            pgu.getInterpData(fNameM1_ion, polyOrder, basisType, comp=0))
        ionM1_y = np.squeeze(
            pgu.getInterpData(fNameM1_ion, polyOrder, basisType, comp=1))
        ion_u_z = np.squeeze(
            pgu.getInterpData(fName_u_ion, polyOrder, basisType, comp=0))
        ion_u_y = np.squeeze(
            pgu.getInterpData(fName_u_ion, polyOrder, basisType, comp=1))
        ionM1_raw = np.squeeze(pgu.getRawData(fNameM1_ion))
        ion_vTsq = np.squeeze(
            pgu.getInterpData(fName_vT_ion, polyOrder, basisType))
        ionT = m_ion * ion_vTsq / (
            (vTe0)**2
        )  #This is an array of temperature values: one for each spatial location
        ionT_boxavg = np.average(ionT)

        eField_z = np.squeeze(
            pgu.getInterpData(fName_field, polyOrder, basisType, comp=0)
        )  # JJ: can this be turned into 1 call without comp specified?
        eField_y = np.squeeze(
            pgu.getInterpData(fName_field, polyOrder, basisType, comp=1))
        #        e_raw = np.squeeze(pgu.getRawData(fName_field))
        #fName     = fileName + '_ion_M1i_'+str(nFr)+'.bp'    #.Complete file name.
        #fName_den = fileName + '_ion_M0_'+str(nFr)+'.bp'    #.Complete file name.

        #ionM1 = np.squeeze(pgu.getInterpData(fName,polyOrder,basisType,comp=compZero))
        # ionM0 = np.squeeze(pgu.getInterpData(fName_den,polyOrder,basisType,comp=compZero))

        #ionM1_raw = np.squeeze(pgu.getRawData(fName))

        #fName = fileName+'_elc_'+str(nFr)+'.bp'

        # fName = fileName+'_ion_'+str(nFr)+'.bp'
        # iond = np.squeeze(pgu.getInterpData(fName,polyOrder,basisType))

        #compute box-averaged distribution function
        elcd_box_avg = np.zeros(
            (vzsize_elc, vysize_elc
             ))  #needs to be initialized b/c it is referenced a few lines down
        iond_box_avg = np.zeros((vzsize_ion, vysize_ion))

        #for k in range(nz):
        #    for j in range(ny):
        #        elcd_box_avg[:,:] += elcd[k,j,:,:]
        #        iond_box_avg[:,:] += iond[k,j,:,:]
        #elcd_box_avg = elcd_box_avg/(ny*nz)
        #iond_box_avg = iond_box_avg/(ny*nz)
        elcd_box_avg = np.average(
            elcd, axis=(0, 1))  #average over both spatial dimensions
        iond_box_avg = np.average(iond, axis=(0, 1))

        elcd_cut = elcd[z0_elc, y0_elc, :, :]
        iond_cut = iond[z0_ion, y0_ion, :, :]

        # eName = fileName+'_field_'+str(nFr)+'.bp'

        #temperature plotting
        #Ti_fname = fileName + '_ion_' + 'intM2Thermal_' + str(nFr) + '.bp'
        #ionM2 = np.squeeze(pgu.getRawData(Ti_fname))
        #        ionM2 = np.squeeze(pgu.getRawData(Ti_fname,polyOrder,basisType,comp=compZero))
        #Jincreasing_simplest_randomkicksfunJ_Tratio100_standard2D_RLW_LMM_edit3_diag_test_ion_intM2Thermal

        #loading electric field

        #split into spatial average and fluctuating parts
        boxavg_eField_z = np.average(eField_z)
        boxavg_eField_y = np.average(eField_y)
        eField_fluct_z = eField_z - boxavg_eField_z
        eField_fluct_y = eField_y - boxavg_eField_y

        boxavg_uElc_z = np.average(elc_u_z)
        boxavg_uElc_y = np.average(elc_u_y)

        #e_raw will contain Ex, Ey, Ez

        #        e_z_raw_cell_average = eField_fluct_z#0.5*e_raw[:,:,0]  #rlw: why multiply by 0.5?
        #        e_y_raw_cell_average = eField_fluct_y#0.5*e_raw[:,:,8]
        #        nz_avg          = e_z_raw_cell_average.shape[0]
        #        ny_avg          = e_z_raw_cell_average.shape[1]

        #compute energy in electric and magnetic fields field
        Tion_boxavg[nFr - iFrame] = ionT_boxavg
        Telc_boxavg[nFr - iFrame] = elcT_boxavg
        eField_fluct_squared[
            nFr - iFrame] = (np.sum(eField_fluct_z**2 + eField_fluct_y**2) /
                             (nz * ny)) / vTe0**2
        eField_fluct_squared_byT[
            nFr - iFrame] = (np.sum(eField_fluct_z**2 + eField_fluct_y**2) /
                             (nz * ny)) / (vTe0 * elcT_boxavg)**2
        #this would be the cell-averaged values (?)
        #        energy_e_tot[nFr-iFrame] = 0.5*(np.sum(e_raw[:,:,0]**2)+ np.sum(e_raw[:,:,8]**2) + np.sum(e_raw[:,:,16]**2))
        #        energy_b_tot[nFr-iFrame] = 0.5*(np.sum(e_raw[:,:,24]**2)+ np.sum(e_raw[:,:,32]**2) + np.sum(e_raw[:,:,40]**2))

        #.Compute the kz=0 component of the electric field
        eField_boxavg_z[nFr - iFrame] = boxavg_eField_z
        #LMM (should this maybe not be normalized?)

        #.Compute current density.
        Jz = ionM1_z - elcM1_z
        Jy = ionM1_y - elcM1_y
        Den = ionM0  #Mass density - ought to subtract the electrons (?) LMM
        J_fluct_z = Jz - np.sum(Jz) / (nz * ny)
        J_fluct_y = Jy - np.sum(Jy) / (nz * ny)
        #        currentDen_raw_z = 0.5*(ionM1_raw[:,:,0] - elcM1_raw[:,:,0])
        #        currentDen_raw_y = 0.5*(ionM1_raw[:,:,8] - elcM1_raw[:,:,8])  #Is 8 the right component??

        #updating J_boxavg_z
        J_boxavg_z[nFr - iFrame] = np.sum(Jz) / (nz * ny)

        #.Extract the time from file.
        hF = ad.file(fName_elc)
        times[nFr - iFrame] = hF['time'].read()
        hF.close()
        time = float('%.3g' % times[nFr - iFrame])  #fix me please

        fignum = str(nFr).zfill(4)

        #Distribution function plots [Dplots]
        fig, axs = plt.subplots(2,
                                3,
                                figsize=(40, 20),
                                facecolor='w',
                                edgecolor='k')
        fig.subplots_adjust(hspace=.5, wspace=.1)
        axs = axs.ravel()

        pos0 = axs[0].pcolormesh(Vz_elc, Vy_elc, elcd_cut)
        axs[0].set_xlabel(r'$v_z/c$', fontsize=30)
        axs[0].set_ylabel(r'$v_y/c$', fontsize=30, labelpad=-1)
        axs[0].set_title(r'$F_e(z_0,y_0,v_z,v_y),$' +
                         rf'$J_z$ = {np.average(Jz)}/vTe0' + r' [$c_{s0}$]',
                         fontsize=26)
        axs[0].tick_params(labelsize=26)
        cbar = fig.colorbar(pos0, ax=axs[0])
        cbar.ax.tick_params(labelsize=22)

        pos1 = axs[1].pcolormesh(Vz_ion, Vy_ion, iond_cut)
        axs[1].set_xlabel(r'$v_z/c$', fontsize=30)
        axs[1].set_ylabel(r'$v_y/c$', fontsize=30, labelpad=-1)
        axs[1].set_title(r'$F_i(z_0,y_0,v_z,v_y),$' + rf't = {time}' +
                         r' [$\omega_{pe}^{-1}$]',
                         fontsize=26)
        axs[1].tick_params(labelsize=26)
        cbar = fig.colorbar(pos1, ax=axs[1])
        cbar.ax.tick_params(labelsize=22)

        pos2 = axs[2].pcolormesh(grid_y, grid_z, Jz)
        axs[2].set_xlabel(r'$y \ [d_e]$', fontsize=30)
        axs[2].set_ylabel(r'$z \ [d_e]$', fontsize=30, labelpad=-1)
        axs[2].set_title(r'$J(z,y)$', fontsize=30)
        axs[2].tick_params(labelsize=26)
        cbar = fig.colorbar(pos2, ax=axs[2])
        cbar.ax.tick_params(labelsize=22)

        pos3 = axs[3].pcolormesh(Vz_elc, Vy_elc, elcd_box_avg)
        axs[3].scatter(boxavg_uElc_z, boxavg_uElc_y, s=60)
        axs[3].scatter(
            np.squeeze(Vz_elc[np.where(elcd_box_avg == np.max(elcd_box_avg))]),
            np.squeeze(Vy_elc[np.where(elcd_box_avg == np.max(elcd_box_avg))]),
            s=40,
            marker='x',
            alpha=1)
        axs[3].set_xlabel(r'$v_z/c$', fontsize=30)
        axs[3].set_ylabel(r'$v_y/c$', fontsize=30, labelpad=-1)
        axs[3].set_title(r'$<F_e(v_z,v_y)>_{z,y},$' + rf't = {time}' +
                         r' [$\omega_{pe}^{-1}$]',
                         fontsize=26)
        axs[3].tick_params(labelsize=26)
        cbar = fig.colorbar(pos3, ax=axs[3])
        cbar.ax.tick_params(labelsize=22)

        pos4 = axs[4].pcolormesh(Vz_ion, Vy_ion, iond_box_avg)
        axs[4].set_xlabel(r'$v_z/c$', fontsize=30)
        axs[4].set_ylabel(r'$v_y/c$', fontsize=30, labelpad=-1)
        axs[4].set_title(r'$<F_i(v_z,v_y)>_{z,y},$' + rf't = {time}' +
                         r' [$\omega_{pe}^{-1}$]',
                         fontsize=26)
        axs[4].tick_params(labelsize=26)
        cbar = fig.colorbar(pos4, ax=axs[4])
        cbar.ax.tick_params(labelsize=22)

        pos5 = axs[5].pcolormesh(grid_y, grid_z, Den)
        axs[5].set_xlabel(r'$y \ [d_e]$', fontsize=30)
        axs[5].set_ylabel(r'$z \ [d_e]$', fontsize=30, labelpad=-1)
        axs[5].set_title('$\\rho (z,y)$', fontsize=30)
        axs[5].tick_params(labelsize=26)
        cbar = fig.colorbar(pos5, ax=axs[5])
        cbar.ax.tick_params(labelsize=22)

        fig.tight_layout()
        plt.savefig(outDir + fileName + rf'_diagnostics_{fignum}.png',
                    bbox_inches='tight')
        plt.close()

        #computing the fourier transform of the electric field
        if (fourier_transform):
            #            e_z_raw_cell_average_k = np.zeros((nz_avg, ny_avg),dtype=complex)
            #            J_z_k_raw              = np.zeros((nz_avg, ny_avg),dtype=complex)
            eField_fluct_z_k = np.fft.fftn(eField_fluct_z / vTe0)
            eField_fluct_y_k = np.fft.fftn(eField_fluct_y / vTe0)
            #print('fft_freq', np.fft.fftfreq(128) )
            J_fluct_z_k = np.fft.fftn(J_fluct_z)
            J_fluct_y_k = np.fft.fftn(J_fluct_y)

            #e_z_raw_cell_average_k_y_int and J_z_k_raw_y_int are integrated over y
            #            e_z_raw_cell_average_k_y_int = np.zeros(nz_avg)
            #            J_z_k_raw_y_int                 = np.zeros(nz_avg)

            #            e_z_raw_cell_average_k[0,0] = 0 #test: try to remove box-averaged component
            #            e_y_raw_cell_average_k[0,0] = 0
            #            J_z_k_raw[0,0] = 0
            #            J_y_k_raw[0,0] = 0
            JdotE_k = np.abs(
                np.transpose(
                    np.fft.fftshift(J_fluct_z_k * eField_fluct_z_k +
                                    J_fluct_y_k * eField_fluct_y_k)))
            eField_fluct_square_K = np.abs(
                np.transpose(
                    np.fft.fftshift(eField_fluct_z_k**2 +
                                    eField_fluct_y_k**2)))
            #integrating over y direction (RLW: can we do this in a vectorized way?)
            #for j in range(ny_avg):
            #    for i in range(nz_avg):
            #            for j in range(ny):
            #                for i in range(nz):
            #                    e_z_raw_cell_average_k_y_int[i] += np.abs(e_z_raw_cell_average_k[i,j])
            #                    J_z_k_raw_y_int[i]              += np.abs(J_z_k_raw[i,j])

            #ek_t[nFr,:] = ek
            fignum = str(nFr).zfill(4)

            #z_plot_2d_sp   = np.linspace(-int(nz_avg/2), int(nz_avg/2-1), nz_avg)/lz
            #y_plot_2d_sp   = np.linspace(-int(ny_avg/2), int(ny_avg/2-1), ny_avg)/ly

            kz_plot_2d_sp = 2.0 * 3.14159 * vTe0 * np.linspace(
                -int(nz / 2), int(nz / 2 - 1), nz) / lz
            ky_plot_2d_sp = 2.0 * 3.14159 * vTe0 * np.linspace(
                -int(ny / 2), int(ny / 2 - 1), ny) / ly
            K_z, K_y = np.meshgrid(kz_plot_2d_sp, ky_plot_2d_sp, indexing='xy')

            #Plotting FFT of electric field and J

            fig, axs = plt.subplots(2,
                                    2,
                                    figsize=(20, 20),
                                    facecolor='w',
                                    edgecolor='k')
            fig.subplots_adjust(hspace=.5, wspace=.1)
            axs = axs.ravel()
            pos0 = axs[0].pcolormesh(Vz_elc, Vy_elc, elcd_box_avg)
            axs[0].scatter(boxavg_uElc_z, boxavg_uElc_y, s=60)
            axs[0].scatter(np.squeeze(
                Vz_elc[np.where(elcd_box_avg == np.max(elcd_box_avg))]),
                           np.squeeze(Vy_elc[np.where(
                               elcd_box_avg == np.max(elcd_box_avg))]),
                           s=40,
                           marker='x',
                           alpha=1)
            axs[0].set_xlabel(r'$v_z/c$', fontsize=30)
            axs[0].set_ylabel(r'$v_y/c$', fontsize=30, labelpad=-1)
            axs[0].set_title(r'$<F_e(v_z,v_y)>_{z,y},$' + rf't = {time}' +
                             r' [$\omega_{pe}^{-1}$]',
                             fontsize=26)
            axs[0].tick_params(labelsize=26)
            cbar = fig.colorbar(pos0, ax=axs[0])
            cbar.ax.tick_params(labelsize=22)

            pos1 = axs[1].pcolormesh(Vz_ion, Vy_ion, iond_box_avg)
            axs[1].set_xlabel(r'$v_z/c$', fontsize=30)
            axs[1].set_ylabel(r'$v_y/c$', fontsize=30, labelpad=-1)
            axs[1].set_title(r'$<F_e(v_z,v_y)>_{z,y},$' + rf't = {time}' +
                             r' [$\omega_{pe}^{-1}$]',
                             fontsize=26)
            axs[1].tick_params(labelsize=26)
            cbar = fig.colorbar(pos1, ax=axs[1])
            cbar.ax.tick_params(labelsize=22)
            #            pos0 = axs[0].plot(e_z_raw_cell_average_k_y_int)
            #            axs[0].set_xlabel(r'$k_z \lambda_{De0}$', fontsize=30)
            #            axs[0].set_ylabel(r'$E(k_z)$', fontsize=30, labelpad=-1)
            #            axs[0].set_title(rf't = {time}'+ r'[$\omega_{pe}^{-1}$]', fontsize=30)
            #            axs[0].tick_params(labelsize = 26)
            #            axs[0].set_yscale('symlog') #plot on log scale and take absolute value of number

            #            pos1 = axs[1].plot(J_z_k_raw_y_int)
            #            axs[1].set_xlabel(r'$k_z d_e$', fontsize=30)
            #            axs[1].set_ylabel(r'$J(k_z)$', fontsize=30, labelpad=-1)
            #            axs[1].set_title(rf't = {time}'+ r'[$\omega_{pe}^{-1}$]', fontsize=30)
            #            axs[1].tick_params(labelsize = 26)
            #            axs[1].set_yscale('symlog')

            pos2 = axs[2].contourf(K_z, K_y, eField_fluct_square_K)
            axs[2].set_xlabel(r'$k_z \lambda_{De0}$', fontsize=30)
            axs[2].set_ylabel(r'$k_y \lambda_{De0}$', fontsize=30, labelpad=-1)
            axs[2].set_title(rf'$|\delta E^2|_k$, t = {time}' +
                             r'[$\omega_{pe}^{-1}$]',
                             fontsize=30)
            axs[2].tick_params(labelsize=26)
            cbar = fig.colorbar(pos2, ax=axs[2])
            cbar.ax.tick_params(labelsize=22)

            pos2 = axs[3].contourf(K_z, K_y, JdotE_k)
            axs[3].set_xlabel(r'$k_z \lambda_{De0}$', fontsize=30)
            axs[3].set_ylabel(r'$k_y \lambda_{De0}$', fontsize=30, labelpad=-1)
            axs[3].set_title(rf'$(\delta J \cdot \delta E)_k$, t = {time}' +
                             r'[$\omega_{pe}^{-1}$]',
                             fontsize=30)
            axs[3].tick_params(labelsize=26)
            cbar = fig.colorbar(pos3, ax=axs[3])
            cbar.ax.tick_params(labelsize=22)
            #            pos3 = axs[3].contourf(K_z, K_y, JdotE_k )
            #            axs[3].set_xlabel(r'$k_z d_e$', fontsize=30)
            #            axs[3].set_ylabel(r'$k_y d_e$', fontsize=30, labelpad=-1)
            #            axs[3].set_title(rf'$(\delta J \cdot \delta E)_k$, t = {time}'+ r'[$\omega_{pe}^{-1}$]', fontsize=30)
            #            axs[3].tick_params(labelsize = 26)
            #            cbar = fig.colorbar(pos3, ax=axs[3])
            #            cbar.ax.tick_params(labelsize=22)

            fig.tight_layout()
            plt.savefig(outDir + fileName + rf'_fft_{fignum}.png',
                        bbox_inches='tight')
            plt.close()

            #PLOTTING (to be done at each time step)


#### we have now left the loop over frames

    Navg = 3
    Rei = eField_boxavg_z + alpha * vTe0
    #    E_over_J_rolling = np.zeros(nFrames)
    for n in range(Navg, nFrames):
        E_over_J_rolling[n] = np.sum(eField_boxavg_z[n - Navg:n]) / np.sum(
            J_boxavg_z[n - Navg:n])
    for n in range(Navg):
        E_over_J_rolling[n] = E_over_J_rolling[
            Navg]  #bfill the first Navg values

    for n in range(Navg, nFrames):
        R_over_J_rolling[n] = np.sum(Rei[n - Navg:n]) / np.sum(
            J_boxavg_z[n - Navg:n])
    for n in range(Navg):
        R_over_J_rolling[n] = R_over_J_rolling[
            Navg]  #bfill the first Navg values
    nu_eff_rolling = -(R_over_J_rolling)

    nu_Sagdeev = 0.025 * np.absolute(
        J_boxavg_z / np.sqrt(Telc_boxavg)) * Telc_boxavg / Tion_boxavg

    #Energy plots
    fig, axs = plt.subplots(2,
                            3,
                            figsize=(30, 20),
                            facecolor='w',
                            edgecolor='k')
    fig.subplots_adjust(hspace=.5, wspace=.1)
    axs = axs.ravel()
    np.save('timesFile', times[0:fFrame - iFrame + 1])
    np.save('eZboxAvgFile', eField_boxavg_z[0:fFrame - iFrame + 1])
    np.save('jZboxAvgFile', J_boxavg_z[0:fFrame - iFrame + 1])
    np.save('eSquaredFile', eField_fluct_squared[0:fFrame - iFrame + 1])
    np.save('Tion_boxavgFile', Tion_boxavg[0:fFrame - iFrame + 1])
    np.save('Telc_boxavgFile', Telc_boxavg[0:fFrame - iFrame + 1])

    axs[0].plot(
        times[0:fFrame - iFrame + 1],
        eField_fluct_squared,
        label=
        r'$ \left(\epsilon_0 \langle|\delta {E}|^2\rangle_{x,y}/2\right)/ T_{e0} $'
    )
    axs[0].plot(
        times[0:fFrame - iFrame + 1],
        eField_fluct_squared_byT,
        label=
        r'$ \left(\epsilon_0 \langle|\delta {E}|^2\rangle_{x,y}/2\right)/ T_{e} $'
    )
    axs[0].set_xlabel(r'$t \ [\omega_{pe}^{-1}]$', fontsize=30)
    axs[0].set_ylabel(
        r'$ \left(\epsilon_0 \langle|\delta {E}|^2\rangle_{x,y}/2\right)/ T_{e0} $',
        fontsize=30)
    axs[0].tick_params(labelsize=26)
    axs[0].legend(fontsize=18)

    axs[1].plot(times[0:fFrame - iFrame + 1], eField_boxavg_z)
    axs[1].set_xlabel(r'$t \ [\omega_{pe}^{-1}]$', fontsize=30)
    axs[1].set_ylabel(r'$\langle E_z \rangle$', fontsize=30)
    axs[1].tick_params(labelsize=26)
    #    axs[1].set_ylim(top = 0.00)

    axs[2].plot(times[0:fFrame - iFrame + 1], E_over_J_rolling)
    axs[2].set_xlabel(r'$t \ [\omega_{pe}^{-1}]$', fontsize=30)
    axs[2].set_ylabel(
        r'$\langle E_z\rangle /\langle\, J_z\rangle \ [\nu_{\mathrm{eff}}/ \omega_{pe}]$',
        fontsize=30)
    axs[2].tick_params(labelsize=26)
    #    axs[2].set_ylim(0.0, 2*np.amax(E_over_J_rolling))

    axs[3].plot(times[0:fFrame - iFrame + 1], Telc_boxavg)
    axs[3].set_xlabel(r'$t \ [\omega_{pe}^{-1}]$', fontsize=30)
    axs[3].set_ylabel(r'$T_e /T_{e0}$', fontsize=30)
    axs[3].tick_params(labelsize=26)
    axs[3].set_ylim(0.0, 2 * np.amax(Telc_boxavg))

    axs[4].plot(times[0:fFrame - iFrame + 1],
                Tion_boxavg,
                label=r'$T_i /T_{e0}$')
    axs[4].plot(times[0:fFrame - iFrame + 1],
                Tion_boxavg / Telc_boxavg,
                label=r'$T_i /T_{e}$')
    axs[4].set_xlabel(r'$t \ [\omega_{pe}^{-1}]$', fontsize=30)
    axs[4].set_ylabel(r'$T_i /T_{e0}$', fontsize=30)
    axs[4].tick_params(labelsize=26)
    axs[4].set_ylim(0.0, 0.1)
    axs[4].legend(fontsize=18)
    #    axs[4].set_ylim(0.0, 2*np.amax(Tion_boxavg) )

    axs[5].plot(times[0:fFrame - iFrame + 1], nu_eff_rolling, label='nu_eff')
    axs[5].plot(times[0:fFrame - iFrame + 1], nu_Sagdeev, label='nu_Sagdeev')
    axs[5].set_xlabel(r'$t \ [\omega_{pe}^{-1}]$', fontsize=30)
    axs[5].set_ylabel(
        r'$\langle R_z\rangle /\langle\, J_z\rangle \ [\nu_{\mathrm{eff}}/ \omega_{pe}]$',
        fontsize=30)
    axs[5].tick_params(labelsize=26)
    axs[5].legend(fontsize=18)

    fig.tight_layout()
    plt.savefig(outDir + fileName + rf'_energy_current.png',
                bbox_inches='tight')
    plt.close()
Ejemplo n.º 42
0
def getTimeAv(dataDir, simName, varName, iFrame, fFrame, p, b, saveAv, tAvDir):
    #.Check or create post data directory.
    checkMkdir(tAvDir)
    #.Check if time average file already exists.
    tAvFile = tAvDir + simName + '_' + varName + '_TimeAv' + str(
        iFrame) + '-' + str(fFrame) + '.bp'
    if not os.path.isfile(tAvFile):
        #.Compute time average and store it in new file.
        fileName = dataDir + simName + '_' + varName + '_%d.bp'
        x, gridDim, nx, lx, dx = getGrid(fileName % iFrame,
                                         p,
                                         b,
                                         location='center')

        q0AvT = np.zeros(nx)
        for nFr in range(iFrame, fFrame + 1):
            #.Read 3D data into q0.
            q0AvT = np.add(q0AvT,
                           np.squeeze(getInterpData(fileName % nFr, p, b)))

        q0AvT = np.divide(q0AvT, float(fFrame - iFrame + 1))

        if saveAv:
            #.Save time average to a file for reuse.
            print(" ")
            print("Saving time average in " + tAvFile + " ...")
            #.Function to write DG coefficients to Gkeyll-style ADIOS file.
            sNumCells = ""
            sOffsets = ""
            for i in range(np.size(nx)):
                sNumCells += "{:d},".format(int(nx[i]))
                sOffsets += "0,"
            #.ADIOS init.
            ad.init_noxml()
            ad.set_max_buffer_size(1000)
            groupId = ad.declare_group("CartFieldInterp", "")
            ad.select_method(groupId, "POSIX1", "", "")
            #.Define variables and attributes.
            ad.define_attribute_byvalue(groupId, "numCells", "", nx)
            lo = np.zeros(np.size(nx), dtype='double')
            up = np.zeros(np.size(nx), dtype='double')
            for i in range(np.size(nx)):
                lo[i], up[i] = x[i][0], x[i][-1]
            ad.define_attribute_byvalue(groupId, "lowerBounds", "", lo)
            ad.define_attribute_byvalue(groupId, "upperBounds", "", up)
            ad.define_var(groupId, "CartGridFieldInterpTimeAv", "",
                          ad.DATATYPE.double, sNumCells, sNumCells, sOffsets)
            fh = ad.open("CartFieldInterp", tAvFile, 'w')
            ad.write(fh, "CartGridFieldInterpTimeAv", q0AvT)
            ad.close(fh)
            ad.finalize()
            #.Deal with weird file output where a '.bp.0' file is created.
            if len(tAvFile.split('/')) > 1:
                nm = tAvFile.split('/')[-1]
            else:
                nm = tAvFile
            shutil.move(tAvFile + '.dir/' + nm + '.0', tAvFile)
            shutil.rmtree(tAvFile + '.dir')
    else:
        #.Read time average from existent file.
        print(" ")
        print("Reading time average in " + tAvFile + " ...")
        hF = ad.file(tAvFile)
        q0AvT = hF['CartGridFieldInterpTimeAv'].read()
        hF.close()

    return q0AvT
Ejemplo n.º 43
0
fstart = 900
fend = fstart+300#3510
dt = 1
DIR ="Run06/"
outDir = "blobDataRun06"

thresholdDensity = 3.1e18
#========== Blob Data Directory Setup =============
if os.path.exists(outDir):
    os.system('rm -rf '+outDir)
    os.system('mkdir '+outDir)
else:
    os.system('mkdir '+outDir)
############################################
data_num = np.arange(start=fstart, stop=fend, step=dt, dtype=int)
f = ad.file(DIR+'asdex_phi_%d'%data_num[0]+'.bp')

blob_size_file = open(outDir+"/blob_size.txt", "w")

Nx = f['numCells'][0]
Ny = f['numCells'][1]
Nz = f['numCells'][2]

Xmin = f['lowerBounds'][0]
Ymin = f['lowerBounds'][1]
Zmin = f['lowerBounds'][2]

Xmax = f['upperBounds'][0]
Ymax = f['upperBounds'][1]
Zmax = f['upperBounds'][2]