예제 #1
0
 def load_meanflow(self, path, FileList=None, OutFile=None):
     exists = os.path.isfile(path + 'MeanFlow/MeanFlow.h5')
     if exists:
         df = pd.read_hdf(path + 'MeanFlow/MeanFlow.h5')
         df = df.drop_duplicates(keep='last')
         grouped = df.groupby(['x', 'y', 'z'])
         df = grouped.mean().reset_index()
     else:
         equ = ['{|gradp|}=sqrt(ddx({<p>})**2+ddy({<p>})**2+ddz({<p>})**2)']
         # nfiles = np.size(os.listdir(path + 'TP_stat/'))
         print('try to calculate data')
         with timer('load mean flow from tecplot data'):
             if FileList is None:
                 df = p2p.ReadAllINCAResults(path + 'TP_stat/',
                                             path + 'MeanFlow/',
                                             SpanAve=True,
                                             Equ=equ,
                                             OutFile='MeanFlow')
             else:
                 df = p2p.ReadAllINCAResults(path + 'TP_stat/',
                                             path + 'MeanFlow/',
                                             FileName=FileList,
                                             SpanAve=True,
                                             Equ=equ,
                                             OutFile='MeanFlow')
         print('done with saving data')
     self._data_field = df
예제 #2
0
 def LoadMeanFlow(self, path, nfiles=44):
     exists = os.path.isfile(path + 'MeanFlow/MeanFlow.h5')
     if exists:
         self._DataTab = pd.read_hdf(path + 'MeanFlow/MeanFlow.h5')
     else:
         df = p2p.ReadAllINCAResults(nfiles, path + 'TP_stat/',
                                     path + 'MeanFlow/',
                                     SpanAve=True,
                                     OutFile='MeanFlow')
         self._DataTab = df
예제 #3
0
 def spanwise_average(cls, path, nfiles):
     dirs = glob(path + 'TP_stat' + '*.plt')
     equ = ['{|gradp|}=sqrt(ddx({<p>})**2+ddy({<p>})**2+ddz({<p>})**2)']
     for i in np.range(np.size(dirs)):
         df = p2p.ReadAllINCAResults(nfiles,
                                     path + 'TP_stat/',
                                     path + 'MeanFlow/',
                                     FileName=dirs[i],
                                     SpanAve=True,
                                     Equ=equ,
                                     OutFile='MeanFlow' + str(i))
         return (df)
예제 #4
0
    def load_data(self, path, FileList=None, NameList=None):
        # nfiles = np.size(os.listdir(path))
        if FileList is None:
            infile = glob(path + '*.plt')
        else:
            infile = FileList

        if NameList is None:
            # ext_name = os.path.splitext(infile)
            df = p2p.ReadAllINCAResults(path, FileName=infile)
        elif NameList == 'h5':
            df = pd.read_hdf(infile)
        else:
            df = p2p.ReadINCAResults(path, VarList=NameList, FileName=infile)
        df = df.drop_duplicates(keep='last')
        grouped = df.groupby(['x', 'y', 'z'])
        df = grouped.mean().reset_index()
        self._data_field = df
예제 #5
0
    def merge_meanflow(self, path):
        dirs = sorted(os.listdir(path + 'TP_stat/'))
        nfiles = np.size(dirs)
        equ = ['{|gradp|}=sqrt(ddx({<p>})**2+ddy({<p>})**2+ddz({<p>})**2)']
        for i in np.arange(nfiles):
            FileList = os.path.join(path + 'TP_stat/', dirs[i])
            with timer(FileList):
                df = p2p.ReadAllINCAResults(path + 'TP_stat/',
                                            path + 'MeanFlow/',
                                            FileName=FileList,
                                            SpanAve=True,
                                            Equ=equ,
                                            OutFile=dirs[i])

                if i == 0:
                    flow = df
                else:
                    flow = flow.append(df, ignore_index=True)

        flow = flow.sort_values(by=['x', 'y', 'z'])
        flow = flow.drop_duplicates(keep='last')
        flow.to_hdf(path + 'MeanFlow/' + 'MeanFlow.h5', 'w', format='fixed')
        self._data_field = flow
예제 #6
0
    def load_3data(self, path, FileList=None, NameList=None):
        # nfiles = np.size(os.listdir(path))
        if FileList is None:
            infile = glob(path + '*.plt')
        else:
            infile = FileList

        if NameList is None:
            df = p2p.ReadAllINCAResults(path, FileName=infile, SpanAve=None)
        elif NameList == 'h5':
            if np.size(FileList) == 1:
                df = pd.read_hdf(infile)
            else:
                num = np.size(FileList)
                df = pd.concat([pd.read_hdf(FileList[i]) for i in range(num)])
                df.reset_index()
        else:
            df = p2p.ReadINCAResults(path,
                                     VarList=NameList,
                                     FileName=infile,
                                     SpanAve=False)

        # df = df.drop_duplicates(keep='last')
        self._data_field = df
예제 #7
0
    plt.close()

# %% save plots for video
pathA = path + 'animation/'
fn1 = '[0.02]DMD'
fstr = '0p02'
if not os.path.exists(pathA + fstr):
    os.mkdir(pathA + fstr)
path_id = pathA + fstr + '/'
num = np.arange(16)
for ii in range(np.size(num)):
    fn2 = str(num[ii])
    fn3 = fn2.zfill(3)
    phase = num[ii] * np.pi
    fn = [pathA + fn1 + fn3 + 'A.plt', pathA + fn1 + fn3 + 'B.plt']
    df = p2p.ReadAllINCAResults(FoldPath=pathA, SpanAve=None, FileName=fn)
    df1 = df.loc[df['z'] == 0]
    dmd_plt(df1, path_id, fn3)

# %% Convert plots to animation
import imageio
from natsort import natsorted, ns
dirs = os.listdir(path_id)
dirs = natsorted(dirs, key=lambda y: y.lower())
with imageio.get_writer(path_id+fstr+'DMDAnima.mp4', mode='I', fps=2) as writer:
    for filename in dirs:
        image = imageio.imread(path_id + filename)
        writer.append_data(image)
    
# %%
# path3 = "/media/weibo/Data3/BFS_M1.7L_0505/3D_DMD/plt/"
예제 #8
0
flow.load_3data(pathB)
flow.spanwise_average()
flow.PlanarData.to_hdf(pathB + "BaseFlow" + ".h5", 'w', format='fixed')
base = flow.PlanarData
# %%
path = "/media/weibo/Data2/BFS_M1.7SFD1/"
pathB = path + "BaseFlow/"
base = pd.read_hdf(path + 'BaseFlow.h5')
varlist = ['x', 'y', 'z', 'u', 'v', 'w', 'rho', 'p', 'Mach', 'T']
x_loc = np.arange(-40.0, 0.0 + 1.0, 1.0)
for i in range(np.size(x_loc)):
    df = base.loc[base['x']==x_loc[i], varlist]
    df.to_csv(pathB + 'InputProfile_' + str(x_loc[i]) + '.dat',
              index=False, float_format='%1.8e', sep=' ')
# %% Plot schematic of the computational domain
pd = p2p.ReadAllINCAResults(240, pathI, FileName='ZSliceSolTime1000.plt')
x, y = np.meshgrid(np.unique(pd.x), np.unique(pd.y))
corner = (x < 0.0) & (y < 0.0)
rho_grad = griddata((pd.x, pd.y), pd['|grad(rho)|'], (x, y))
print("rho_grad max = ", np.max(pd['|grad(rho)|']))
print("rho_grad min = ", np.min(pd['|grad(rho)|']))
rho_grad[corner] = np.nan

fig, ax = plt.subplots(figsize=(10, 4))
matplotlib.rc("font", size=textsize)
rg = np.linspace(0.1, 1.8, 18)
cbar = ax.contourf(x, y, rho_grad, cmap='gray_r', levels=rg, extend='max')
cbar.cmap.set_over('#000000')
ax.set_xlim(-10.0, 30.0)
ax.set_ylim(-3.0, 10.0)
ax.set_xlabel(r"$x/\delta_0$", fontdict=font)
예제 #9
0
rg1 = np.linspace(0.2, 1.4, 13)
rg2 = np.linspace(0.2, 1.4, 4)
cbar = ax.contourf(x, y, rho, cmap="rainbow", levels=rg1,
                   extend='both')  # rainbow_r
plt.colorbar(cbar, orientation="horizontal", extendrect='False', ticks=rg2)
ax.set_xlim(-80.0, 60.0)
ax.set_ylim(0.0, 40.0)
ax.set_xlabel(r"$x/\delta_0$", fontsize=12)
ax.set_ylabel(r"$y/\delta_0$", fontsize=12)
plt.gca().set_aspect("equal", adjustable="box")
plt.savefig(path + "initial.svg", bbox_inches="tight")
plt.show()

# %%
path = '/media/weibo/Data2/FFS_M1.7L2/'
df1 = p2p.ReadAllINCAResults(path, FileName=path + 'initial1.plt')
grouped = df1.groupby(['x', 'y'])
df0 = grouped.mean().reset_index()
volume = [(-70, 40.0), (0.0, 6.0)]
dxyz = [0.015625, 0.0078125, 0.125]
#volume = [(-70, 40.0), (6.0, 33.0)]
#dxyz = [0.0625, 0.0625, 0.125]
xval = np.arange(volume[0][0], volume[0][1] + dxyz[0], dxyz[0])
yval = np.arange(volume[1][0], volume[1][1] + dxyz[1], dxyz[1])
x, y = np.meshgrid(xval, yval)
z = np.zeros(np.shape(x))

name = ['x', 'y', 'z', 'u', 'v', 'w', 'rho', 'p', 'T']
u = griddata((df0.x, df0.y), df0['u'], (x, y), fill_value=0, method='cubic')
v = griddata((df0.x, df0.y), df0['v'], (x, y), fill_value=0, method='cubic')
w = griddata((df0.x, df0.y), df0['w'], (x, y), fill_value=0, method='cubic')
예제 #10
0
pathT = path + 'TimeAve/'
dirs = glob(pathTP + '*plt')
equ = ['{|gradp|}=sqrt(ddx({<p>})**2+ddy({<p>})**2+ddz({<p>})**2)']
varlist, equ = va.mean_var(opt='gradient')

num = np.size(dirs)
a1 = int(num / 4)
a2 = a1 * 2
a3 = a1 * 3
ind = [[0, a1], [a1, a2], [a2, a3], [a3, num]]
for i in range(4):
    print('finish part ' + str(i))
    FileList = dirs[ind[i][0]:ind[i][1]]
    df = p2p.ReadAllINCAResults(pathTP,
                                pathM,
                                FileName=FileList,
                                Equ=equ,
                                OutFile='MeanFlow_' + str(i))

dir1 = glob(pathM + 'MeanFlow_*')
df0 = pd.read_hdf(dir1[0])
grouped = df0.groupby(['x', 'y'])
mean0 = grouped.mean().reset_index()
del df0

df1 = pd.read_hdf(dir1[1])
grouped = df1.groupby(['x', 'y'])
mean1 = grouped.mean().reset_index()
del df1

df2 = pd.read_hdf(dir1[2])
예제 #11
0
modeflow = modes[:, num].reshape(-1, 1) * amplitudes[num] * np.exp(phase)
fluc = modeflow.reshape((m, o), order='F')
newflow = fluc.real
xarr = xval.values.reshape(-1, 1)  # row to column
yarr = yval.values.reshape(-1, 1)
zarr = zval.values.reshape(-1, 1)
names = ['x', 'y', 'z', var0, var1, var2, var3, var4,
         'u`', 'v`', 'w`', 'p`', 'T`']
data = np.hstack((xarr, yarr, zarr, base, newflow))
df = pd.DataFrame(data, columns=names)

# %% load data
freq1 = 0.022  # freq[num]
path1 = path3D + '0p022/'
files = glob(path1 + '*DMD007?.plt')
df = p2p.ReadAllINCAResults(path1, FileName=files)
# %% in X-Y plane, preprocess
var = 'u'
avg = False
fa = 0.0   # for mean value
amp = 1.0  # for fluctuations
sliceflow = df.loc[df['z']==0]
if var == 'u':
    varval = sliceflow[var] * fa + sliceflow['u`'] * amp
    grouped = df.groupby(['x', 'y'])
    df2 = grouped.mean().reset_index()
    # varval = df2['u`']

if var == 'p':
    varval = sliceflow[var] * fa + sliceflow['p`'] * amp
    grouped = df.groupby(['x', 'y'])