def test_dtype_with_object(self): "Test using an explicit dtype with an object" assert_equal(True, False) from datetime import date import time data = """ 1; 2001-01-01 2; 2002-01-31 """ ndtype = [('idx', int), ('code', np.object)] func = lambda s: strptime(s.strip(), "%Y-%m-%d") converters = {1: func} test = textadapter.genfromtxt(StringIO(data), delimiter=";", dtype=ndtype, converters=converters) control = np.array([(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], dtype=ndtype) assert_equal(test, control) # ndtype = [('nest', [('idx', int), ('code', np.object)])] try: test = textadapter.genfromtxt(StringIO(data), delimiter=";", dtype=ndtype, converters=converters) except NotImplementedError: errmsg = "Nested dtype involving objects should be supported." raise AssertionError(errmsg)
def test_user_filling_values(self): "Test with missing and filling values" ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)]) data = "N/A, 2, 3\n4, ,???" kwargs = dict(delimiter=",", dtype=int, names="a,b,c", missing_values={ 0: "N/A", 'b': " ", 2: "???" }, filling_values={ 0: 0, 'b': 0, 2: -999 }) test = textadapter.genfromtxt(StringIO(data), **kwargs) ctrl = np.array([(0, 2, 3), (4, 0, -999)], dtype=[(_, int) for _ in "abc"]) assert_equal(test, ctrl) test = textadapter.genfromtxt(StringIO(data), usecols=(0, -1), **kwargs) ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"]) assert_equal(test, ctrl)
def test_commented_header(self): "Check that names can be retrieved even if the line is commented out." data = StringIO(""" #gender age weight M 21 72.100000 F 35 58.330000 M 33 21.99 """) # The # is part of the first name and should be deleted automatically. test = textadapter.genfromtxt(data, names=True, dtype=None) ctrl = np.array( [('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], # JNB: changed test because textadapter defaults to object string # instead of fixed length string, and unsigned long int # instead of int. dtype=[('gender', 'O'), ('age', 'u8'), ('weight', 'f8')]) #dtype=[('gender', '|S1'), ('age', int), ('weight', float)]) assert_equal(test, ctrl) # Ditto, but we should get rid of the first element data = StringIO(""" # gender age weight M 21 72.100000 F 35 58.330000 M 33 21.99 """) test = textadapter.genfromtxt(data, names=True, dtype=None) assert_equal(test, ctrl)
def test_replace_space(self): "Test the 'replace_space' option" txt = "A.A, B (B), C:C\n1, 2, 3.14" # Test default: replace ' ' by '_' and delete non-alphanum chars test = textadapter.genfromtxt(StringIO(txt), delimiter=",", names=True, dtype=None) ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)] ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) assert_equal(test, ctrl) # Test: no replace, no delete test = textadapter.genfromtxt(StringIO(txt), delimiter=",", names=True, dtype=None, replace_space='', deletechars='') ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)] ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) assert_equal(test, ctrl) # Test: no delete (spaces are replaced by _) test = textadapter.genfromtxt(StringIO(txt), delimiter=",", names=True, dtype=None, deletechars='') ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)] ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self): warn_ctx = WarningManager() warn_ctx.__enter__() try: basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' warnings.filterwarnings("ignore") # Footer too small to get rid of all invalid values assert_raises(ValueError, textadapter.genfromtxt, StringIO(basestr), skip_footer=1) a = textadapter.genfromtxt(StringIO(basestr), skip_footer=1, invalid_raise=False) assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) # a = textadapter.genfromtxt(StringIO(basestr), skip_footer=3) assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) # basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n' a = textadapter.genfromtxt(StringIO(basestr), skip_footer=1, invalid_raise=False) assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]])) a = textadapter.genfromtxt(StringIO(basestr), skip_footer=3, invalid_raise=False) assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) finally: warn_ctx.__exit__()
def test_usecols_with_named_columns(self): "Test usecols with named columns" ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)]) data = "1 2 3\n4 5 6" kwargs = dict(names="a, b, c") test = textadapter.genfromtxt(StringIO(data), usecols=(0, -1), **kwargs) assert_equal(test, ctrl) test = textadapter.genfromtxt(StringIO(data), usecols=('a', 'c'), **kwargs) assert_equal(test, ctrl)
def test_gft_using_generator(self): def count(): for i in range(10): yield "%d" % i res = textadapter.genfromtxt(count()) assert_array_equal(res, np.arange(10))
def test_usecols_as_css(self): "Test giving usecols with a comma-separated string" data = "1 2 3\n4 5 6" test = textadapter.genfromtxt(StringIO(data), names="a, b, c", usecols="a, c") ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"]) assert_equal(test, ctrl)
def test_tricky_converter_bug1666(self): "Test some corner case" assert_equal(True, False) s = StringIO('q1,2\nq3,4') cnv = lambda s: float(s[1:]) test = textadapter.genfromtxt(s, delimiter=',', converters={0: cnv}) control = np.array([[1., 2.], [3., 4.]]) assert_equal(test, control)
def test_with_masked_column_uniform(self): "Test masked column" data = StringIO('1 2 3\n4 5 6\n') test = textadapter.genfromtxt(data, dtype=None, missing_values='2,5', usemask=True) control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]]) assert_equal(test, control)
def test_names_auto_completion(self): "Make sure that names are properly completed" data = "1 2 3\n 4 5 6" test = textadapter.genfromtxt(StringIO(data), dtype=(int, float, int), names="a") ctrl = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('f1', float), ('f2', int)]) assert_equal(test, ctrl)
def test_skip_footer(self): data = ["# %i" % i for i in range(1, 6)] data.append("A, B, C") data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)]) data[-1] = "99,99" kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10) test = textadapter.genfromtxt(StringIO("\n".join(data)), **kwargs) ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)], dtype=[(_, float) for _ in "ABC"]) assert_equal(test, ctrl)
def test_with_masked_column_various(self): "Test masked column" data = StringIO('True 2 3\nFalse 5 6\n') test = textadapter.genfromtxt(data, dtype=None, missing_values='2,5', usemask=True) control = ma.array([(1, 2, 3), (0, 5, 6)], mask=[(0, 1, 0), (0, 1, 0)], dtype=[('f0', bool), ('f1', bool), ('f2', int)]) assert_equal(test, control)
def test_userconverters_with_explicit_dtype(self): "Test user_converters w/ explicit (standard) dtype" data = StringIO('skip,skip,2001-01-01,1.0,skip') test = textadapter.genfromtxt(data, delimiter=",", names=None, dtype=float, usecols=(2, 3), converters={2: bytes}) control = np.array([('2001-01-01', 1.)], dtype=[('', '|S10'), ('', float)]) assert_equal(test, control)
def test_missing_with_tabs(self): "Test w/ a delimiter tab" txt = "1\t2\t3\n\t2\t\n1\t\t3" test = textadapter.genfromtxt( StringIO(txt), delimiter="\t", usemask=True, ) ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)], ) ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool) assert_equal(test.data, ctrl_d) assert_equal(test.mask, ctrl_m)
def test_empty_file(self): "Test that an empty file raises the proper warning." warn_ctx = WarningManager() warn_ctx.__enter__() try: warnings.filterwarnings("ignore", message="genfromtxt: Empty input file:") data = StringIO() test = textadapter.genfromtxt(data) assert_equal(test, np.array([])) finally: warn_ctx.__exit__()
def test_names_with_usecols_bug1636(self): "Make sure we pick up the right names w/ usecols" data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4" ctrl_names = ("A", "C", "E") test = textadapter.genfromtxt(StringIO(data), dtype=(int, int, int), delimiter=",", usecols=(0, 2, 4), names=True) assert_equal(test.dtype.names, ctrl_names) # test = textadapter.genfromtxt(StringIO(data), dtype=(int, int, int), delimiter=",", usecols=("A", "C", "E"), names=True) assert_equal(test.dtype.names, ctrl_names) # test = textadapter.genfromtxt(StringIO(data), dtype=int, delimiter=",", usecols=("A", "C", "E"), names=True) assert_equal(test.dtype.names, ctrl_names)
def test_gft_using_filename(self): # Test that we can load data from a filename as well as a file object wanted = np.arange(6).reshape((2, 3)) if sys.version_info[0] >= 3: # python 3k is known to fail for '\r' linesep = ('\n', '\r\n') else: linesep = ('\n', '\r\n', '\r') for sep in linesep: data = '0 1 2' + sep + '3 4 5' f, name = mkstemp() # We can't use NamedTemporaryFile on windows, because we cannot # reopen the file. try: os.write(f, asbytes(data)) assert_array_equal(textadapter.genfromtxt(name), wanted) finally: os.close(f) os.unlink(name)
def readMesh(fileLoc, fileName, LOFEM=False): ''' Takes in the file location and file name and it then generates a dictionary structure from those files for the mesh. Input: fileLoc = a string of the loaction of file on your computer fileName = a string of the name of the file assuming they are all equal for .mesh, .kocks, and .grain Outpute: mesh = a dictionary that contains the following fields in it: name = file location eqv = any equivalence nodes currently this is an empty nest grains = what grain each element corresponds to con = connectivity of the mesh for each element crd = coordinates of each node surfaceNodes = surface nodes of the mesh kocks = kocks angles for each grain phases = phase number of each element ''' surfaceNodes = [] con = [] crd = [] eqv = [] name = fileLoc meshLoc = fileLoc + fileName + '.mesh' grainLoc = fileLoc + fileName + '.grain' kockLoc = fileLoc + fileName + '.kocks' grains = [] phases = [] kocks = [] mesh = {} mesh['name'] = name mesh['eqv'] = [] with open(meshLoc) as f: # data = f.readlines() for line in f: words = line.split() # print(words) lenWords = len(words) if not words: continue if lenWords == 4: nums = wordParser(words) crd.append(nums[1:4]) if lenWords == 7: nums = wordParser(words) surfaceNodes.append(nums[0:7]) if lenWords == 11: nums = wordParser(words) con.append(nums[1:11]) grains = np.genfromtxt(grainLoc, usecols=(0), skip_header=1, skip_footer=0) ugrains = np.unique(grains) phases = np.genfromtxt(grainLoc, usecols=(1), skip_header=1, skip_footer=0) kocks = np.genfromtxt(kockLoc, usecols=(0, 1, 2), skip_header=2, skip_footer=1) if not kocks.shape[0] == ugrains.shape[0]: kocks = np.genfromtxt(kockLoc, usecols=(0, 1, 2), skip_header=2, skip_footer=0) mesh['con'] = np.require(np.asarray(con, order='F', dtype=np.int32).transpose(), requirements=['F']) mesh['crd'] = np.require(np.asarray(crd, order='F').transpose(), requirements=['F']) mesh['surfaceNodes'] = np.require(np.asarray(surfaceNodes, order='F', dtype=np.int32).transpose(), requirements=['F']) mesh['grains'] = np.asfortranarray(grains.transpose(), dtype=np.int32) mesh['kocks'] = util.mat2d_row_order(np.asfortranarray(kocks.transpose())) mesh['phases'] = np.asfortranarray(phases.transpose(), dtype=np.int8) if (LOFEM): crd_meshLoc = fileLoc + fileName + '.cmesh' crd_grainLoc = fileLoc + fileName + '.cgrain' cgrains = ta.genfromtxt(crd_grainLoc, usecols=(0)) cphases = ta.genfromtxt(crd_grainLoc, usecols=(1)) ccon = ta.genfromtxt(crd_meshLoc, skip_header=1) mesh['crd_con'] = np.asfortranarray(ccon.transpose(), dtype=np.int32) - 1 mesh['crd_grains'] = np.asfortranarray(cgrains.transpose(), dtype=np.int32) mesh['crd_phases'] = np.asfortranarray(cphases.transpose(), dtype=np.int8) return mesh
def readGrainData(fileLoc, grainNum, frames=None, grData=None): ''' Reads in the grain data that you are interested in. It can read the specific rod, gammadot, and gamma files. Input: fileLoc = a string of the file location grainNum = an integer of the grain number frames = what frames you are interested in, default value is all of them lofemData = what data files you want to look at, default value is: ang, gamma, gammadot Output: data a dictionary that contains an ndarray of all the values read in the above file. rod_0: a float array of the original orientation at each nodal point of the grain. rod: a float array of the orientation at each nodal point of the grain through each frame. gamma: a float array of the integrated gammadot at each nodal point of the grain through each frame. gdot: a float array of the gammadot at each nodal point of the grain through each frame. ''' flDflt = False frDflt = False data = {} if grData is None: grData = ['ang', 'gamma', 'gdot'] flDflt = True if frames is None: strgrnum = np.char.mod('%4.4d', np.atleast_1d(grainNum))[0] if grData[0] == 'ang': fend = '.rod' else: fend = '.data' file = fileLoc + 'gr_' + grData[0] + strgrnum + fend nFrames = findComments(file) if grData[0] == 'ang': nFrames = nFrames - 1 frames = np.arange(1, nFrames + 1) frDflt = True else: nFrames = np.size(frames) frames = np.asarray(frames) + 1 for fName in grData: print(fName) tFrames = [] if fName == 'ang': tnf = nFrames + 1 tFrames = frames.copy() fend = 'rod' if (not frDflt): tFrames = np.concatenate(([1], tFrames)) else: tnf = nFrames tFrames = frames.copy() fend = 'data' tmp = [] strgrnum = np.char.mod('%4.4d', np.atleast_1d(grainNum))[0] fLoc = fileLoc + 'gr_' + fName + strgrnum + '.' + fend if frDflt: tmp = ta.genfromtxt(fLoc, comments='%') else: tmp = selectFrameTxt(fLoc, tFrames, comments='%') vec = np.atleast_2d(tmp).shape if vec[0] == 1: vec = (vec[1], vec[0]) temp = np.reshape(np.ravel(tmp), (tnf, np.int32(vec[0] / tnf), vec[1])).T if fName == 'ang': data['angs_0'] = np.atleast_3d(temp[:, :, 0]) data['angs'] = np.atleast_3d(temp[:, :, 1::1]) else: data[fName] = np.atleast_3d(temp) return data
def readLOFEMData(fileLoc, nProc, nstps=None, nelems=None, ncrds=None, nqpts=15, frames=None, lofemData=None, restart=False): ''' Reads in the data files that you are interested in across all the processors and only for the frames that you are interested in as well Input: fileLoc = a string of the file location nProc = an integer of the number of processors used in the simulation frames = what frames you are interested in, default value is all of them lofemData = what data files you want to look at, default value is: .strain, .stress,.crss, .agamma Output: data = a dictionary that contains a list/ndarray of all read in data files. If other files other than default are wanted than the keys for those values will be the file location. The default files have the following key values: coord_0: a float array of original coordinates hard_0: a float array of original crss_0/g_0 for each element angs_0: a float array of original kocks angles for each element vel_0: a float array of original velocity at each node coord: a float array of deformed coordinates hard: a float array of crss/g for each element angs: a float array of evolved kocks angles for each element stress: a float array of the crystal stress for each element strain: a float array of the sample strain for each element pldefrate: a float of the plastic deformation rate for each element plstrain: a float ofp the plastic strain for each element vel: a float array of the velocity at each node ''' flDflt = False frDflt = False data = {} proc = np.arange(nProc) if lofemData is None: lofemData = ['strain', 'stress', 'crss', 'agamma', 'ang'] flDflt = True if frames is None: if nstps is None: fName = lofemData[0] if fName == 'ang': strgrnum = np.char.mod('%4.4d', np.atleast_1d(0))[0] file = fileLoc + 'gr_' + fName + strgrnum + '.rod' else: file = fileLoc + 'lofem.' + fName + '.0' nFrames = findComments(file) if fName == 'ang' or fName == 'adx' or fName == 'advel' or fName == 'crss' or fName == 'rod': if restart: frames = np.arange(1, nFrames + 1) else: frames = np.arange(1, nFrames) nFrames = nFrames - 1 else: frames = np.arange(1, nFrames + 1) frDflt = True else: if nstps is None: fName = lofemData[0] if fName == 'ang': strgrnum = np.char.mod('%4.4d', np.atleast_1d(0))[0] file = fileLoc + 'gr_' + fName + strgrnum + '.rod' else: file = fileLoc + 'lofem.' + fName + '.0' nstps = findComments(file) if fName == 'ang' or fName == 'adx' or fName == 'advel' or fName == 'crss' or fName == 'rod': if not restart: nstps = nstps - 1 # frames = np.arange(1, nstps + 1) proc_elems, proc_crds = mpi_partioner(nProc, ncrds, nelems) nFrames = np.size(frames) frames = np.asarray(frames) + 1 for fName in lofemData: print(fName) tmp = [] tproc = [] temp = [] tFrames = [] if fName == 'ang' or fName == 'adx' or fName == 'advel' or fName == 'crss' or fName == 'rod': tnf = nFrames + 1 if restart: tnf = nFrames tFrames = frames.copy() if (not frDflt): tFrames = np.concatenate(([1], tFrames)) else: tnf = nFrames tFrames = frames.copy() npf = 0 for p in proc: # print(p) tmp = [] tmp1 = [] if fName == 'ang': strgrnum = np.char.mod('%4.4d', np.atleast_1d(p))[0] fLoc = fileLoc + 'gr_' + fName + strgrnum + '.rod' else: fLoc = fileLoc + 'lofem.' + fName + '.' + str(p) if frDflt: tmp = ta.genfromtxt(fLoc, comments='%') else: if fName == 'ang' or fName == 'adx' or fName == 'advel' or fName == 'rod': skipst = proc_crds[p] * (tFrames[0] - 1) skipft = proc_crds[p] * (nstps - (tFrames[0])) elif fName == 'agamma_q' or fName == 'gamma_q' or fName == 'gammadot_q': skipst = proc_elems[p] * (tFrames[0] - 1) * nqpts skipft = proc_elems[p] * tFrames[0] * nqpts else: skipst = proc_elems[p] * (tFrames[0] - 1) skipft = proc_elems[p] * (nstps - (tFrames[0])) nvals = skipft - skipst nvals = skipft - skipst tmp = np.genfromtxt(fLoc, comments='%', skip_header=skipst, max_rows=nvals) # tmp = selectFrameTxt(fLoc, tFrames, comments='%') vec = np.atleast_2d(tmp).shape if vec[0] == 1: vec = (vec[1], vec[0]) npf += vec[0] / tnf tmp1 = np.reshape(np.ravel(tmp), (tnf, np.int32(vec[0] / tnf), vec[1])).T tproc.append(tmp1) temp = np.asarray(np.concatenate(tproc, axis=1)) # temp = tproc.reshape(vec[1], npf, tnf, order='F').copy() # Multiple setup for the default data names have to be changed to keep comp saved # First two if and if-else statements are for those that have default values if fName == 'adx' or fName == 'advel' or fName == 'crss' or fName == 'ang': if fName == 'adx': tName = 'coord' elif fName == 'advel': tName = 'vel' elif fName == 'ang': tName = 'angs' else: tName = 'crss' if restart: data[tName] = np.atleast_3d(temp) else: data[tName + '_0'] = np.atleast_3d(temp[:, :, 0]) data[tName] = np.atleast_3d(temp[:, :, 1::1]) elif fName == 'dpeff': tName = 'pldefrate' data[tName] = np.atleast_3d(temp) elif fName == 'eqplstrain': tName = 'plstrain' data[tName] = np.atleast_3d(temp) elif fName == 'agamma_q' or fName == 'gamma_q' or fName == 'gammadot_q': nslip = temp.shape[0] nqpts = 15 nelems = np.int32(temp.shape[1] / nqpts) temp1d = np.ravel(temp) temp4d = temp1d.reshape(nslip, nelems, nqpts, nFrames) data[fName] = np.swapaxes(np.swapaxes(temp4d, 0, 2), 1, 2) else: data[fName] = np.atleast_3d(temp) return data
#%% r2d = 180 / np.pi print('About to start processing data') kor = 'rod' ldata = fepxDM.readLOFEMData(fileLoc, nproc, lofemData=['strain', 'stress', 'crss']) #print('Finished Reading LOFEM data') print('Starting to read DISC data') data = fepxDM.readData(fileLoc, nproc, fepxData=['adx', 'strain', 'stress', 'crss'], restart=False) #, 'ang']) print('Finished Reading DISC data') #%% misori = ta.genfromtxt(fileLoc + fBname + 'diff.emisori', comments='%') dmisori = ta.genfromtxt(fileLoc + fBname + '_DISC.cmisori', comments='%') gr_cmisori = ta.genfromtxt(fileLoc + fBname + '.cmisori', comments='%') #alpha = ta.genfromtxt(fileLoc+fBname+'.alpha', comments='%') dmisori = dmisori.reshape((nsteps, nels)).T * r2d misori = misori.reshape((nsteps, nels)).T * r2d #gr_cmisori = gr_misori.reshape((nsteps, nels)).T*r2d #alpha = alpha.reshape((nsteps, nels)).T print('Finished Reading in Misori, Gr Misori, and Alpha') #%% gconn = np.asarray([], dtype='float64') gconn = np.atleast_2d(gconn) gupts = np.asarray([], dtype=np.int32)
def test_integer_delimiter(self): "Test using an integer for delimiter" data = " 1 2 3\n 4 5 67\n890123 4" test = textadapter.genfromtxt(StringIO(data), delimiter=3, dtype=int) control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]]) assert_equal(test, control)
def readData(fileLoc, nProc, frames=None, fepxData=None, restart=False): ''' Reads in the data files that you are interested in across all the processors and only for the frames that you are interested in as well Input: fileLoc = a string of the file location nProc = an integer of the number of processors used in the simulation frames = what frames you are interested in, default value is all of them fepxData = what data files you want to look at, default value is: .ang, .strain, .stress, .adx, .advel, .dpeff, .eqplstrain, .crss Output: data = a dictionary that contains a list/ndarray of all read in data files. If other files other than default are wanted than the keys for those values will be the file location. The default files have the following key values: coord_0: a float array of original coordinates hard_0: a float array of original crss_0/g_0 for each element angs_0: a float array of original kocks angles for each element vel_0: a float array of original velocity at each node coord: a float array of deformed coordinates hard: a float array of crss/g for each element angs: a float array of evolved kocks angles for each element stress: a float array of the crystal stress for each element strain: a float array of the sample strain for each element pldefrate: a float of the plastic deformation rate for each element plstrain: a float of the plastic strain for each element vel: a float array of the velocity at each node ''' flDflt = False frDflt = False data = {} proc = np.arange(nProc) if fepxData is None: fepxData = [ 'ang', 'strain', 'stress', 'adx', 'advel', 'dpeff', 'eqplstrain', 'crss' ] flDflt = True if frames is None: fName = fepxData[0] file = fileLoc + 'post.' + fName + '.0' nFrames = findComments(file) if fName == 'ang' or fName == 'adx' or fName == 'advel' or fName == 'crss' or fName == 'rod': if restart: frames = np.arange(1, nFrames + 1) else: frames = np.arange(1, nFrames) nFrames = nFrames - 1 else: frames = np.arange(1, nFrames + 1) frDflt = True else: nFrames = np.size(frames) frames = np.asarray(frames) + 1 for fName in fepxData: print(fName) tmp = [] tproc = [] temp = [] tFrames = [] if fName == 'ang' or fName == 'adx' or fName == 'advel' or fName == 'crss' or fName == 'rod': tnf = nFrames + 1 if restart: tnf = nFrames tFrames = frames.copy() if (not frDflt): tFrames = np.concatenate(([1], tFrames)) else: tnf = nFrames tFrames = frames.copy() npf = 0 for p in proc: # print(p) tmp = [] tmp1 = [] fLoc = fileLoc + 'post.' + fName + '.' + str(p) if frDflt: tmp = ta.genfromtxt(fLoc, comments='%') else: tmp = selectFrameTxt(fLoc, tFrames, comments='%') vec = np.atleast_2d(tmp).shape if vec[0] == 1: vec = (vec[1], vec[0]) npf += vec[0] / tnf tmp1 = np.reshape(np.ravel(tmp), (tnf, np.int32(vec[0] / tnf), vec[1])).T tproc.append(tmp1) temp = np.asarray(np.concatenate(tproc, axis=1)) # temp = tproc.reshape(vec[1], npf, tnf, order='F').copy() # Multiple setup for the default data names have to be changed to keep comp saved # First two if and if-else statements are for those that have default values if fName == 'ang': if restart: data['angs'] = np.atleast_3d(temp[1:4, :, :]) else: data['angs_0'] = np.atleast_3d(temp[1:4, :, 0]) data['angs'] = np.atleast_3d(temp[1:4, :, 1::1]) elif fName == 'adx' or fName == 'advel' or fName == 'crss' or fName == 'rod': if fName == 'adx': tName = 'coord' elif fName == 'advel': tName = 'vel' elif fName == 'rod': tName = 'rod' else: tName = 'crss' if restart: data[tName] = np.atleast_3d(temp) else: data[tName + '_0'] = np.atleast_3d(temp[:, :, 0]) data[tName] = np.atleast_3d(temp[:, :, 1::1]) elif fName == 'dpeff': tName = 'pldefrate' data[tName] = np.atleast_3d(temp) elif fName == 'eqplstrain': tName = 'plstrain' data[tName] = np.atleast_3d(temp) elif fName == 'stress_q': nvec = temp.shape[0] nqpts = 15 nelems = np.int32(temp.shape[1] / nqpts) temp1d = np.ravel(temp) temp4d = temp1d.reshape(nvec, nelems, nqpts, nFrames) data[fName] = np.swapaxes(np.swapaxes(temp4d, 0, 2), 1, 2) else: data[fName] = np.atleast_3d(temp) return data
def test_usecols_with_integer(self): "Test usecols with an integer" test = textadapter.genfromtxt(StringIO("1 2 3\n4 5 6"), usecols=0) assert_equal(test, np.array([1., 4.]))