def load_test_data(idx, frameIdx):
    densities_test = []
    velocities_test = []
    header_density = 'dummy'
    if os.path.exists("%s/testSimple_%04d" % (BASE_PATH, idx)):
        for i in range(-8, 9):
            filename = "%s/testSimple_%04d/density_%04d.uni"
            uniPath = filename % (BASE_PATH, idx, frameIdx + i
                                  )  # 100 files per sim
            header_density, content = uniio.readUni(
                uniPath)  # returns [Z,Y,X,C] np array
            h = header_density['dimX']
            w = header_density['dimY']
            arr = content[:, :, :, :]  # reverse order of Y axis
            arr = np.reshape(arr, [w, h, 1])  # discard Z
            densities_test.append(arr)
        densities_test = np.reshape(densities_test,
                                    (len(densities_test), res, res, 1))
        for i in range(-8, 9):
            filename = "%s/testSimple_%04d/vel_%04d.uni"
            uniPath = filename % (BASE_PATH, idx, frameIdx + i
                                  )  # 100 files per sim
            header, content = uniio.readUni(
                uniPath)  # returns [Z,Y,X,C] np array
            h = header['dimX']
            w = header['dimY']
            arr = content[:, :, :, :]  # reverse order of Y axis
            arr = np.reshape(arr, [w, h, 3])  # discard Z
            velocities_test.append(arr)
        velocities_test = np.reshape(velocities_test,
                                     (len(velocities_test), res, res, 3))
    return densities_test, velocities_test, header_density
コード例 #2
0
def step_1(x, t, dt):

	# from numpy array to uni file
	vel_fn = os.path.join(sim_dir, 'vel_%04d.uni')
	density_fn = os.path.join(sim_dir, 'density_%04d.uni')

	xr = x.reshape(1, 64, 64, 3)
	np_density = xr[:, :, :, [0]]
	np_vel = zeros((1, 64, 64, 3)) # manta takes 3d velocities
	np_vel[:, :, :, :2] = xr[:, :, :, 1:]

	uniio.writeUni(density_fn % (t / dt), generate_header(np_density), np_density)
	uniio.writeUni(vel_fn % (t / dt), generate_header(np_vel), np_vel)

	# loading written data to state
	density.load(density_fn % (t / dt))
	vel.load(vel_fn % (t / dt))

	# applying model
	euler_step()

	# writing to uni
	density.save(density_fn % ((t + dt) / dt))
	vel.save(vel_fn % ((t + dt) / dt))

	# loading back as numpy array
	_, np_density = uniio.readUni(density_fn % ((t + dt) / dt))
	_, np_vel = uniio.readUni(vel_fn % ((t + dt) / dt))

	xn = zeros((xr.shape[0], 64, 64, 3))
	xn[:, :, :, [0]] = np_density
	xn[:, :, :, 1:] = np_vel[:, :, :, :2] # manta takes 3d velocities
	xn = xn.reshape(1, 3 * 64 * 64)

	return xn
def get_data_next_batch(data_addrs, ibatch, batch_size, export=False):
    data_streamline_occ = []
    data_streamline_vel = []
    data_grid_lvst = []
    data_grid_vel = []
    for i in range(ibatch * batch_size, ibatch * batch_size + batch_size):
        (data_sketch_addr, data_lvst_addr, data_vel_addr) = data_addrs[i]
        if os.path.isfile(data_sketch_addr) and os.path.isfile(
                data_lvst_addr) and os.path.isfile(data_vel_addr):
            grid_sketch_occ = read_streamline_bin(data_sketch_addr)
            header_lvst, grid_lvst = uniio.readUni(data_lvst_addr)
            header_vel, grid_vel = uniio.readUni(data_vel_addr)

            # clean the velocity field, using levelset as mask(must be same as the streamline program!)
            fluid_mask = (grid_lvst <= 1.0)
            grid_vel = grid_vel * fluid_mask

            # extract streamline velocity
            grid_sketch_vel = grid_sketch_occ * grid_vel

            # binarify levelset grid: should not include zero(ex. crop levelset grid is 0 everywhere)
            grid_lvst = np.array(grid_lvst < 0.0, dtype='float32')

            data_streamline_occ.append(grid_sketch_occ)
            data_streamline_vel.append(grid_sketch_vel)
            data_grid_lvst.append(grid_lvst)
            data_grid_vel.append(grid_vel)

            # export sketch and levelset file to check if the two grid data are aligned correctly
            # the visulization routine is in cpp file!
            if (export):
                uniio.writeUni(
                    '../VisFluid/data/sketch_occ_{:04}.uni'.format(i),
                    header_lvst, grid_sketch_occ)
                uniio.writeUni(
                    '../VisFluid/data/sketch_vel_{:04}.uni'.format(i),
                    header_vel, grid_sketch_vel)
                uniio.writeUni('../VisFluid/data/levelset_{:04}.uni'.format(i),
                               header_lvst, grid_lvst)
                uniio.writeUni('../VisFluid/data/vel_{:04}.uni'.format(i),
                               header_vel, grid_vel)

        else:
            print('{:} or {:} does not exists!'.format(data_sketch_addr,
                                                       data_lvst_addr))

    data_streamline_occ = np.asarray(data_streamline_occ, dtype='float32')
    data_streamline_vel = np.asarray(data_streamline_vel, dtype='float32')
    data_grid_lvst = np.asarray(data_grid_lvst, dtype='float32')
    data_grid_vel = np.asarray(data_grid_vel, dtype='float32')

    # concatenate occupancy field and velocity field
    data_streamline = np.concatenate(
        (data_streamline_occ, data_streamline_vel), axis=-1)
    data_grid = np.concatenate((data_grid_lvst, data_grid_vel), axis=-1)
    return data_streamline, data_grid
コード例 #4
0
def load_initial_conditions():
	root0 = '/home/debezenac/projects/DAPPER/mods/Euler/uni/simSimple_1000'
	vel0_fn = os.path.join(root0, 'vel_%04d.uni')
	density0_fn = os.path.join(root0, 'density_%04d.uni')
	_, np_density0 = uniio.readUni(density0_fn % 0)
	_, np_vel0 = uniio.readUni(vel0_fn % 0)
	x0 = zeros((1, 64, 64, 3))
	x0[:, :, :, [0]] = np_density0
	x0[:, :, :, 1:] = np_vel0[:, :, :, :2] # manta takes 3d velocities
	x0 = x0.reshape(3 * 64 * 64)
	return x0
コード例 #5
0
def get_data_next_batch(data_x_addrs,
                        data_y_addrs,
                        ibatch,
                        batch_size,
                        export=False):
    data_x = []
    data_y = []
    for i in range(ibatch * batch_size, ibatch * batch_size + batch_size):
        if os.path.isfile(data_x_addrs[i]) and os.path.isfile(data_y_addrs[i]):
            # grid_x 			= read_streamline_txt(data_x_addrs[i])
            grid_x = read_streamline_bin(data_x_addrs[i])
            uni_header, grid_y = uniio.readUni(data_y_addrs[i])

            # binarify levelset grid: should not include zero(ex. crop levelset grid is 0 everywhere)
            grid_y = np.array(grid_y < 0.0, dtype='float32')

            data_x.append(grid_x)
            data_y.append(grid_y)
            # export sketch and levelset file to check if the two grid data are aligned correctly
            # the visulization routine is in cpp file!
            if (export):
                uniio.writeUni('../VisFluid/data/sketch_{:04}.uni'.format(i),
                               uni_header, grid_x)
                uniio.writeUni('../VisFluid/data/levelset_{:04}.uni'.format(i),
                               uni_header, grid_y)
        else:
            print('{:} or {:} does not exists!'.format(data_x_addrs[i],
                                                       data_y_addrs[i]))

    data_x = np.asarray(data_x, dtype='float32')
    data_y = np.asarray(data_y, dtype='float32')
    return data_x, data_y
コード例 #6
0
def get_next_batch_uni(data_addrs, ibatch, batch_size):
    data = []
    for i in range(ibatch * batch_size, ibatch * batch_size + batch_size):
        if os.path.isfile(data_addrs[i]):
            grid_header, grid_content = uniio.readUni(data_addrs[i])
            data.append(grid_content)
    data = np.asarray(data, dtype='float32')
    return data
コード例 #7
0
ファイル: tilecreator.py プロジェクト: tum-pbs/mantaflow
def uniToArray(uniPath, is_vel=False):
	head, content = uniio.readUni(uniPath)

	imageHeight = head['dimX']
	imageWidth  = head['dimY']
	#print(format(uniPath) + " " + format(head)) # debug info

	if not is_vel:
		fixedArray = np.reshape(content, [imageWidth, imageHeight])
		fixedArray = fixedArray[::-1] # make a copy of the array in reverse order
	else:
		fixedArray = np.reshape(content, [imageWidth, imageHeight, 3])
		fixedArray = fixedArray[::-1]

	return fixedArray
コード例 #8
0
ファイル: tilecreator.py プロジェクト: mayerjRRR/mantaflowEx1
def arrayToUni(input, savePath, motherUniPath, imageHeight, imageWidth, is_vel=False):
	head, _ = uniio.readUni(motherUniPath)
	head['dimX'] = imageWidth
	head['dimY'] = imageHeight

	if not is_vel:
		fixedArray = np.zeros((imageHeight, imageWidth), dtype='f')
		for x in range(0, imageHeight):
			for y in range(0, imageWidth):
				fixedArray[x][y] = input[(imageHeight - 1) - x][y]
	else:
		fixedArray = np.zeros((imageHeight, imageWidth, 3), dtype='f')
		for x in range(0, imageHeight):
			for y in range(0, imageWidth):
				fixedArray[x][y] = input[(imageHeight - 1) - x][y]

	uniio.writeUni(savePath, head, fixedArray)
コード例 #9
0
ファイル: ioext.py プロジェクト: Thanduriel/NeuralTurbulence
def loadData(path):
    files = []
    for entry in glob.glob(path, recursive=True):
        files.append(entry)
    # order is arbitary
    files.sort()

    data = []
    for f in files:
        header, content = uniio.readUni(f)
        h = header['dimX']
        w = header['dimY']
        bytes = header['bytesPerElement']
        arr = content[:, ::-1, :, :]  # reverse order of Y axis
        arr = np.reshape(arr, [w, h, bytes // 4])
        data.append(arr)
    return data
コード例 #10
0
	def loadSingleDatum(self, fn, lstr, idxOffset=0):
		""" Determine file type and load
		"""
		if idxOffset!=0:
			fn = self.mogrifyFilenameIndex(fn,idxOffset)
		if self.print_info>1:
			print("Loading: "+fn+", "+lstr)
		# detect file type
		if fn.endswith( ".npz" ):
			ar = np.load(fn)[ lstr ]
		elif fn.endswith( ".uni" ):
			_, ar = uniio.readUni(fn) # load-string lstr not needed for uni files
			#ar = ar[::-1] # make a copy of the array in reverse order
		else:
			raise FluidDataLoaderError("FluidDataLoader error: got filename %s, but only .uni or .npz supported at the moment " % (fn))

		return ar
gs = vec3(res, res, res)
s = Solver(name='main', gridSize=gs, dim=dim)

phi = s.create(LevelsetGrid)
mesh = s.create(Mesh)
flags = s.create(FlagGrid)
flags.initDomain(boundaryWidth=0)

# if GUI:
# 	gui = Gui()
# 	gui.show(dim==3)
# 	gui.setCamPos(0., 0., -2)
# 	gui.setCamRot(20,-60,0)

# uni head definition
header_example, content_example = uniio.readUni(
    os.path.join('./data_extend', 'flipLevelSet_example.uni'))


# load unextended version of addresses
def load_data_addrs_comp(filetype):
    addrs = []
    addrs_sketch = []
    addrs_lvst = []
    addrs_vel = []
    filename = []
    if filetype == 'train':
        filename = 'train_addrs.txt'
    else:
        filename = 'test_addrs.txt'
    with open(os.path.join('./data/result', filename), 'r') as f:
        for line in f:
コード例 #12
0
basePath = '../data/'

trainingEpochs = 2500
batchSize = 10
inSize = 64 * 64 * 1  # warning - hard coded to scalar values 64^2

# load data
densities = []

# start reading simSimple 1000 ff.
for sim in range(1000, 2000):
    if os.path.exists("%s/simSimple_%04d" % (basePath, sim)):
        for i in range(0, 100):
            filename = "%s/simSimple_%04d/density_%04d.uni"
            uniPath = filename % (basePath, sim, i)  # 100 files per sim
            header, content = uniio.readUni(
                uniPath)  # returns [Z,Y,X,C] np array
            h = header['dimX']
            w = header['dimY']
            arr = content[:, ::-1, :, :]  # reverse order of Y axis
            arr = np.reshape(arr, [w, h, 1])  # discard Z
            densities.append(arr)

loadNum = len(densities)
if loadNum < 200:
    print(
        "Error - use at least two full sims, generate data by running 'manta ./manta_genSimSimple.py' a few times..."
    )
    exit(1)

densities = np.reshape(densities, (len(densities), 64, 64, 1))
コード例 #13
0
# root_2 = '/home/debezenac/projects/DAPPER/mods/Euler/uni/from_init/no_pressure_reloadsimSimple_1000'

root_1 = '/home/debezenac/projects/DAPPER/mods/Euler/uni/from_init/with_pressuresimSimple_1000'
root_2 = '/tmp/tmpdz9t3bpr/'  #'/home/debezenac/projects/DAPPER/mods/Euler/uni/test2'

# root = 'uni/no_pressure_step_at_init/simSimple_1000'
den_fn_1 = os.path.join(root_1, 'density_%04d.uni')
vel_fn_1 = os.path.join(root_1, 'vel_%04d.uni')

den_fn_2 = os.path.join(root_2, 'density_%04d.uni')
vel_fn_2 = os.path.join(root_2, 'vel_%04d.uni')

densities = []
velocities = []
for i in range(0, 500, interval):
    header1, content1 = uniio.readUni(den_fn_1 %
                                      (i + 1))  # returns [Z,Y,X,C] np array
    header2, content2 = uniio.readUni(vel_fn_1 %
                                      (i + 1))  # returns [Z,Y,X,C] np array
    h1 = header1['dimX']
    w1 = header1['dimY']
    h2 = header2['dimX']
    w2 = header2['dimY']
    arr = content1[:, ::-1, :, :]  # reverse order of Y axis
    arr = np.reshape(arr, [w1, h1, 1])  # discard Z
    densities.append(arr)
    arr = content2[:, ::-1, :, :]  # reverse order of Y axis
    arr = np.reshape(arr, [w2, h2, 3])  # discard Z
    velocities.append(arr)

    plt.subplot(2, 2, 1)
    plt.title('{}, velocities {}'.format(root_1, i))
コード例 #14
0
velOld = s.create(MACGrid)
velParts = s.create(MACGrid)
mapWeights = s.create(MACGrid)

pp = s.create(BasicParticleSystem)
pVel = pp.create(PdataVec3)
mesh = s.create(Mesh)
mesh_fluid = s.create(Mesh)
mesh_fluid_vel = s.create(Mesh)
mesh_obs = s.create(Mesh)

pindex = s.create(ParticleIndexSystem)
gpi = s.create(IntGrid)

# uni head definition
header_example, content_example = uniio.readUni(
    os.path.join('../tensorflow/data_extend', 'flipLevelSet_example.uni'))
header_vel_example, content_vel_example = uniio.readUni(
    os.path.join('../tensorflow/data_extend', 'flipVel_example.uni'))


# load unextended version of addresses
def load_data_addrs_comp(filetype):
    addrs = []
    addrs_sketch = []
    addrs_lvst = []
    addrs_vel = []
    filename = []
    if filetype == 'train':
        filename = 'train_addrs.txt'
    else:
        filename = 'test_addrs.txt'
コード例 #15
0
ファイル: test.py プロジェクト: emited/DAPPER
import os
from manta import *
import sys
sys.path.append("/home/debezenac/packages/manta/tensorflow/tools")
import uniio

root_2 = '/home/debezenac/projects/DAPPER/mods/Euler/uni/test2'
vel_fn_2 = os.path.join(root_2, 'vel_%04d.uni')
den_fn_2 = os.path.join(root_2, 'density_%04d.uni')

i = 0
header1, content1 = uniio.readUni(den_fn_2 %
                                  (i + 1))  # returns [Z,Y,X,C] np array
header2, content2 = uniio.readUni(vel_fn_2 %
                                  (i + 1))  # returns [Z,Y,X,C] np array

import numpy as np
print('content', content1.shape, content2.shape)
print('n', np.sum(content2[:, :, :, 0] == 0))
print('n', np.sum(content2[:, :, :, 1] == 0))
print('n', np.sum(content2[:, :, :, 2] == 0))
print(64**2)

from collections import OrderedDict
import time


def generate_header(content):
    '''be careful, the order in the header counts!'''

    header = OrderedDict([
コード例 #16
0
    if generateUni:
        # set low density to zero to save storage space...
        cond_out = dim_output < 0.0005
        dim_output[cond_out] = 0
        uniio.writeUni(
            packedSimPath + '/sim_%04d/source_%04d.uni' %
            (fromSim, imageindex + frame_min), head, dim_output)
        print('stored .uni file')
    return


print('*****OUTPUT ONLY*****')
#print("{} tiles, {} tiles per image".format(100, 1))
#print("Generating images (batch size: {}, batches: {})".format(1, 100))

if not load_model_test_3 == -1 and not load_model_test_2 == -1 and not load_model_test_1 == -1:
    print("At least one network has to be loaded.")
    exit(1)

head_0, _ = uniio.readUni(packedSimPath + "sim_%04d/density_low_%04d.uni" %
                          (fromSim, 0))
for layerno in range(frame_min, frame_max):
    print(layerno)
    generate3DUniForNewNetwork(imageindex=layerno - frame_min,
                               outPath=test_path,
                               head=head_0)

print('Test finished, %d pngs written to %s.' %
      (frame_max - frame_min, test_path))
コード例 #17
0
def generate3DUniForNewNetwork(imageindex=0,
                               outPath='../',
                               inputPer=3.0,
                               head=None):
    start = time.time()
    dim_output = []
    intermed_res1 = []

    batch_xs_tile = x_3d[imageindex]

    if not load_model_test_1 == -1:
        # z y x -> 2d conv on y - x (or different combination of axis, depending on transposeAxis)
        # and switch velocity channels depending on orientation
        if transposeAxis == 1:
            batch_xs_in = np.reshape(
                scipy.ndimage.zoom(batch_xs_tile, [1, upRes, 1, 1],
                                   order=1,
                                   mode='constant',
                                   cval=0.0),
                [-1, simSizeHigh, simSizeLow, n_inputChannels])
            batch_xs_in = np.reshape(batch_xs_in.transpose(
                1, 0, 2, 3), (-1, simSizeLow, simSizeLow, n_inputChannels))
            temp_vel = np.copy(batch_xs_in[:, :, :, 3:4])
            batch_xs_in[:, :, :, 3:4] = np.copy(batch_xs_in[:, :, :, 2:3])
            batch_xs_in[:, :, :, 2:3] = np.copy(temp_vel)
        elif transposeAxis == 2:
            batch_xs_in = np.reshape(
                scipy.ndimage.zoom(batch_xs_tile, [1, 1, upRes, 1],
                                   order=1,
                                   mode='constant',
                                   cval=0.0),
                [-1, simSizeLow, simSizeHigh, n_inputChannels])
            batch_xs_in = np.reshape(batch_xs_in.transpose(
                2, 1, 0, 3), (-1, simSizeLow, simSizeLow, n_inputChannels))
            temp_vel = np.copy(batch_xs_in[:, :, :, 3:4])
            batch_xs_in[:, :, :, 3:4] = np.copy(batch_xs_in[:, :, :, 1:2])
            batch_xs_in[:, :, :, 1:2] = np.copy(temp_vel)
        elif transposeAxis == 3:
            batch_xs_in = np.reshape(
                scipy.ndimage.zoom(batch_xs_tile, [1, 1, upRes, 1],
                                   order=1,
                                   mode='constant',
                                   cval=0.0),
                [-1, simSizeLow, simSizeHigh, n_inputChannels])
            batch_xs_in = np.reshape(batch_xs_in.transpose(
                2, 0, 1, 3), (-1, simSizeLow, simSizeLow, n_inputChannels))
            temp_vel = np.copy(batch_xs_in[:, :, :, 3:4])
            temp_vel2 = np.copy(batch_xs_in[:, :, :, 2:3])
            batch_xs_in[:, :, :, 3:4] = np.copy(batch_xs_in[:, :, :, 1:2])
            batch_xs_in[:, :, :, 2:3] = np.copy(temp_vel)
            batch_xs_in[:, :, :, 1:2] = np.copy(temp_vel2)
        else:
            batch_xs_in = np.reshape(
                scipy.ndimage.zoom(batch_xs_tile, [upRes, 1, 1, 1],
                                   order=1,
                                   mode='constant',
                                   cval=0.0),
                [-1, simSizeLow, simSizeLow, n_inputChannels])

        if add_adj_idcs1:
            batch_xs_in = np.concatenate(
                (batch_xs_in, np.zeros_like(batch_xs_in[:, :, :, 0:1])),
                axis=3)
            batch_xs_in = np.concatenate(
                (batch_xs_in, np.zeros_like(batch_xs_in[:, :, :, 0:1])),
                axis=3)

            for i in range(batch_xs_in.shape[0]):
                if i == 0:
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels:n_inputChannels +
                                1] = np.zeros_like(batch_xs_in[i:i + 1, :, :,
                                                               0:1])
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels + 1:n_inputChannels +
                                2] = batch_xs_in[i + 1:i + 2, :, :, 0:1]
                elif i == batch_xs_in.shape[0] - 1:
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels:n_inputChannels +
                                1] = batch_xs_in[i - 1:i, :, :, 0:1]
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels + 1:n_inputChannels +
                                2] = np.zeros_like(batch_xs_in[i - 1:i, :, :,
                                                               0:1])
                else:
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels:n_inputChannels +
                                1] = batch_xs_in[i - 1:i, :, :, 0:1]
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels + 1:n_inputChannels +
                                2] = batch_xs_in[i + 1:i + 2, :, :, 0:1]

        # start generating output of first network
        batch_sz_out = 8
        run_metadata = tf.RunMetadata()

        start = time.time()
        for j in range(0, batch_xs_in.shape[0] // batch_sz_out):
            #	x in shape (z,y,x,c)
            # 	-> 512 x 512 x 512
            results = sess.run(sampler,
                               feed_dict={
                                   x:
                                   batch_xs_in[j * batch_sz_out:(j + 1) *
                                               batch_sz_out].reshape(
                                                   -1, n_input),
                                   percentage:
                                   inputPer,
                                   train:
                                   False
                               })
            intermed_res1.extend(results)

            # exact timing of network performance...
            if 0:
                fetched_timeline = timeline.Timeline(run_metadata.step_stats)
                chrome_trace = fetched_timeline.generate_chrome_trace_format()
                with open('timeline_8x_%04d.json' % (j), 'w') as f:
                    f.write(chrome_trace)
        end = time.time()

        print("time for first network: {0:.6f}".format(end - start))

        dim_output = np.copy(
            np.array(intermed_res1).reshape(simSizeHigh, simSizeHigh,
                                            simSizeHigh)).transpose(2, 1, 0)

        save_img_3d(
            outPath + 'source_1st_{:04d}.png'.format(imageindex + frame_min),
            dim_output / 80)

    if not load_model_test_2 == -1:
        if transposeAxis == 3:
            batch_xs_in = np.reshape(
                scipy.ndimage.zoom(batch_xs_tile, [1, upRes, 1, 1],
                                   order=1,
                                   mode='constant',
                                   cval=0.0),
                [-1, simSizeHigh, simSizeLow, n_inputChannels])
            batch_xs_in = np.reshape(batch_xs_in.transpose(
                1, 0, 2, 3), (-1, simSizeLow, simSizeLow, n_inputChannels))
            temp_vel = np.copy(batch_xs_in[:, :, :, 3:4])
            batch_xs_in[:, :, :, 3:4] = np.copy(batch_xs_in[:, :, :, 2:3])
            batch_xs_in[:, :, :, 2:3] = np.copy(temp_vel)
        elif transposeAxis == 0:
            batch_xs_in = np.reshape(
                scipy.ndimage.zoom(batch_xs_tile, [1, 1, upRes, 1],
                                   order=1,
                                   mode='constant',
                                   cval=0.0),
                [-1, simSizeLow, simSizeHigh, n_inputChannels])
            batch_xs_in = np.reshape(batch_xs_in.transpose(
                2, 1, 0, 3), (-1, simSizeLow, simSizeLow, n_inputChannels))
            temp_vel = np.copy(batch_xs_in[:, :, :, 3:4])
            batch_xs_in[:, :, :, 3:4] = np.copy(batch_xs_in[:, :, :, 1:2])
            batch_xs_in[:, :, :, 1:2] = np.copy(temp_vel)
        elif transposeAxis == 1:
            batch_xs_in = np.reshape(
                scipy.ndimage.zoom(batch_xs_tile, [1, 1, upRes, 1],
                                   order=1,
                                   mode='constant',
                                   cval=0.0),
                [-1, simSizeLow, simSizeHigh, n_inputChannels])
            batch_xs_in = np.reshape(batch_xs_in.transpose(
                2, 0, 1, 3), (-1, simSizeLow, simSizeLow, n_inputChannels))
            temp_vel = np.copy(batch_xs_in[:, :, :, 3:4])
            temp_vel2 = np.copy(batch_xs_in[:, :, :, 2:3])
            batch_xs_in[:, :, :, 3:4] = np.copy(batch_xs_in[:, :, :, 1:2])
            batch_xs_in[:, :, :, 2:3] = np.copy(temp_vel)
            batch_xs_in[:, :, :, 1:2] = np.copy(temp_vel2)
        else:
            batch_xs_in = np.reshape(
                scipy.ndimage.zoom(batch_xs_tile, [upRes, 1, 1, 1],
                                   order=1,
                                   mode='constant',
                                   cval=0.0),
                [-1, simSizeLow, simSizeLow, n_inputChannels])

        if add_adj_idcs2:
            batch_xs_in = np.concatenate(
                (batch_xs_in, np.zeros_like(batch_xs_in[:, :, :, 0:1])),
                axis=3)

            for i in range(batch_xs_in.shape[0]):
                if i == 0:
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels:n_inputChannels +
                                1] = np.zeros_like(batch_xs_in[i:i + 1, :, :,
                                                               0:1])
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels + 1:n_inputChannels +
                                2] = batch_xs_in[i + 1:i + 2, :, :, 0:1]
                elif i == batch_xs_in.shape[0] - 1:
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels:n_inputChannels +
                                1] = batch_xs_in[i - 1:i, :, :, 0:1]
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels + 1:n_inputChannels +
                                2] = np.zeros_like(batch_xs_in[i - 1:i, :, :,
                                                               0:1])
                else:
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels:n_inputChannels +
                                1] = batch_xs_in[i - 1:i, :, :, 0:1]
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels + 1:n_inputChannels +
                                2] = batch_xs_in[i + 1:i + 2, :, :, 0:1]

        intermed_res1 = []
        batch_sz_out = 2

        start = time.time()
        for j in range(0, batch_xs_in.shape[0] // batch_sz_out):
            #	x in shape (z,y,x,c)
            # 	-> 64 x 256 x 256
            results = sess.run(
                sampler_2,
                feed_dict={
                    x:
                    batch_xs_in[j * batch_sz_out:(j + 1) *
                                batch_sz_out].reshape(-1, n_input),
                    y:
                    dim_output[j * batch_sz_out:(j + 1) *
                               batch_sz_out].reshape(-1, n_output),
                    percentage:
                    inputPer,
                    train:
                    False
                })
            intermed_res1.extend(results)

            # exact timing of network performance...
            if 0:
                fetched_timeline = timeline.Timeline(run_metadata.step_stats)
                chrome_trace = fetched_timeline.generate_chrome_trace_format()
                with open('timeline_8x_%04d.json' % (j), 'w') as f:
                    f.write(chrome_trace)
        end = time.time()

        print("time for second network: {0:.6f}".format(end - start))

        dim_output = np.array(intermed_res1).reshape(simSizeHigh, simSizeHigh,
                                                     simSizeHigh).transpose(
                                                         1, 2, 0)

        save_img_3d(
            outPath + 'source_2nd_{:04d}.png'.format(imageindex + frame_min),
            dim_output / 80)

    if not load_model_test_3 == -1:
        if transposeAxis == 0:
            batch_xs_in = np.reshape(
                scipy.ndimage.zoom(batch_xs_tile, [1, upRes, 1, 1],
                                   order=1,
                                   mode='constant',
                                   cval=0.0),
                [-1, simSizeHigh, simSizeLow, n_inputChannels])
            batch_xs_in = np.reshape(batch_xs_in.transpose(
                1, 0, 2, 3), (-1, simSizeLow, simSizeLow, n_inputChannels))
            temp_vel = np.copy(batch_xs_in[:, :, :, 3:4])
            batch_xs_in[:, :, :, 3:4] = np.copy(batch_xs_in[:, :, :, 2:3])
            batch_xs_in[:, :, :, 2:3] = np.copy(temp_vel)
        elif transposeAxis == 3:
            batch_xs_in = np.reshape(
                scipy.ndimage.zoom(batch_xs_tile, [1, 1, upRes, 1],
                                   order=1,
                                   mode='constant',
                                   cval=0.0),
                [-1, simSizeLow, simSizeHigh, n_inputChannels])
            batch_xs_in = np.reshape(batch_xs_in.transpose(
                0, 2, 1, 3), (-1, simSizeLow, simSizeLow, n_inputChannels))
            temp_vel = np.copy(batch_xs_in[:, :, :, 2:3])
            batch_xs_in[:, :, :, 2:3] = np.copy(batch_xs_in[:, :, :, 1:2])
            batch_xs_in[:, :, :, 1:2] = np.copy(temp_vel)
        elif transposeAxis == 2:
            batch_xs_in = np.reshape(
                scipy.ndimage.zoom(batch_xs_tile, [1, 1, upRes, 1],
                                   order=1,
                                   mode='constant',
                                   cval=0.0),
                [-1, simSizeLow, simSizeHigh, n_inputChannels])
            batch_xs_in = np.reshape(batch_xs_in.transpose(
                1, 2, 0, 3), (-1, simSizeLow, simSizeLow, n_inputChannels))
            temp_vel = np.copy(batch_xs_in[:, :, :, 3:4])
            temp_vel2 = np.copy(batch_xs_in[:, :, :, 13])
            batch_xs_in[:, :, :, 3:4] = np.copy(batch_xs_in[:, :, :, 2:3])
            batch_xs_in[:, :, :, 2:3] = np.copy(batch_xs_in[:, :, :, 1:2])
            batch_xs_in[:, :, :, 1:2] = np.copy(temp_vel)
        else:
            batch_xs_in = np.reshape(
                scipy.ndimage.zoom(batch_xs_tile, [upRes, 1, 1, 1],
                                   order=1,
                                   mode='constant',
                                   cval=0.0),
                [-1, simSizeLow, simSizeLow, n_inputChannels])

        if add_adj_idcs3:
            batch_xs_in = np.concatenate(
                (batch_xs_in, np.zeros_like(batch_xs_in[:, :, :, 0:1])),
                axis=3)

            for i in range(batch_xs_in.shape[0]):
                if i == 0:
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels:n_inputChannels +
                                1] = np.zeros_like(batch_xs_in[i:i + 1, :, :,
                                                               0:1])
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels + 1:n_inputChannels +
                                2] = batch_xs_in[i + 1:i + 2, :, :, 0:1]
                elif i == batch_xs_in.shape[0] - 1:
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels:n_inputChannels +
                                1] = batch_xs_in[i - 1:i, :, :, 0:1]
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels + 1:n_inputChannels +
                                2] = np.zeros_like(batch_xs_in[i - 1:i, :, :,
                                                               0:1])
                else:
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels:n_inputChannels +
                                1] = batch_xs_in[i - 1:i, :, :, 0:1]
                    batch_xs_in[i:i + 1, :, :,
                                n_inputChannels + 1:n_inputChannels +
                                2] = batch_xs_in[i + 1:i + 2, :, :, 0:1]

        intermed_res1 = []
        batch_sz_out = 2

        start = time.time()
        for j in range(0, batch_xs_in.shape[0] // batch_sz_out):
            #	x in shape (z,y,x,c)
            # 	-> 64 x 256 x 256
            results = sess.run(
                sampler_3,
                feed_dict={
                    x:
                    batch_xs_in[j * batch_sz_out:(j + 1) *
                                batch_sz_out].reshape(-1, n_input),
                    y:
                    dim_output[j * batch_sz_out:(j + 1) *
                               batch_sz_out].reshape(-1, n_output),
                    percentage:
                    inputPer,
                    train:
                    False
                })
            intermed_res1.extend(results)

            # exact timing of network performance...
            if 0:
                fetched_timeline = timeline.Timeline(run_metadata.step_stats)
                chrome_trace = fetched_timeline.generate_chrome_trace_format()
                with open('timeline_8x_%04d.json' % (j), 'w') as f:
                    f.write(chrome_trace)
        end = time.time()

        print("time for third network: {0:.6f}".format(end - start))

        dim_output = np.array(intermed_res1).reshape(simSizeHigh, simSizeHigh,
                                                     simSizeHigh)

        save_img_3d(
            outPath + 'source_3rd_{:04d}.png'.format(imageindex + frame_min),
            dim_output / 80)

    if not load_model_no_2 == -1:
        dim_output = dim_output.transpose(2, 0, 1)
    if not load_model_no_1 == -1:
        dim_output = dim_output.transpose(2, 1, 0)

    # output for images of slices (along every dimension)
    if 1:
        for i in range(simSizeHigh // 2 - 1, simSizeHigh // 2 + 1):
            if np.average(dim_output[i]) > 0.0001:
                save_img(outPath + 'slice_xy_{:04d}_{:04d}.png'.format(
                    i, (imageindex + frame_min)),
                         dim_output[i])  #.transpose(2,1,0)
                save_img(
                    outPath + 'slice_yz_{:04d}_{:04d}.png'.format(
                        i, (imageindex + frame_min)),
                    dim_output.transpose(2, 1, 0)[i])
                save_img(
                    outPath + 'slice_xz_{:04d}_{:04d}.png'.format(
                        i, (imageindex + frame_min)),
                    dim_output.transpose(1, 0, 2)[i])
    if (imageindex + frame_min) == 110:
        for i in range(0, tileSizeHigh):
            if np.average(dim_output[i]) > 0.0001:
                save_img(outPath + 'slice_xy_{:04d}_{:04d}.png'.format(
                    (imageindex + frame_min), i),
                         dim_output[i])  #.transpose(2,1,0)
                save_img(
                    outPath + 'slice_yz_{:04d}_{:04d}.png'.format(
                        (imageindex + frame_min), i),
                    dim_output.transpose(2, 1, 0)[i])
                save_img(
                    outPath + 'slice_xz_{:04d}_{:04d}.png'.format(
                        (imageindex + frame_min), i),
                    dim_output.transpose(1, 0, 2)[i])

    if head is None:
        head, _ = uniio.readUni(packedSimPath +
                                "sim_%04d/density_low_%04d.uni" % (fromSim, 0))
    head['dimX'] = simSizeHigh
    head['dimY'] = simSizeHigh
    head['dimZ'] = simSizeHigh

    if generateUni:
        # set low density to zero to save storage space...
        cond_out = dim_output < 0.0005
        dim_output[cond_out] = 0
        uniio.writeUni(
            packedSimPath + '/sim_%04d/source_%04d.uni' %
            (fromSim, imageindex + frame_min), head, dim_output)
        print('stored .uni file')
    return
コード例 #18
0
import collections
import glob

#-- mantaflow fluid solver
res = 128
dim = 3
gs = vec3(res, res, res)
s = Solver(name='main', gridSize=gs, dim=dim)

phi = s.create(LevelsetGrid)
mesh = s.create(Mesh)
flags = s.create(FlagGrid)
flags.initDomain(boundaryWidth=0)

# uni head definition
header_example, content_example = uniio.readUni(
    os.path.join('../tensorflow/shared', 'flipLevelSet_example.uni'))


# ../VisMat/data\4096\sbfs_flip_water_pipes_10113\flipSketchGrid_0030.bin
# ../manta/sbfs_scenes/data\sbfs_flip_water_pipes_10113\flipLevelSet_0030.uni
# ../manta/sbfs_scenes/data\sbfs_flip_water_pipes_10113\flipVel_0030.uni
def load_data_addrs(args):
    addrs = []
    train_addrs = []
    test_addrs = []

    with open(os.path.join(args.shared_dir, 'train_addrs.txt'), 'r') as f:
        for line in f:
            (addr_x, addr_y, addr_z) = line.split()
            train_addrs.append((addr_x, addr_y, addr_z))
    with open(os.path.join(args.shared_dir, 'test_addrs.txt'), 'r') as f:
コード例 #19
0
def run(dim, SynCurlFlag, anticipatP, dirPath, SEED, data_dirs, den_model,
        curl_model, PATCH_SIZE, LOGs):
    steps = 160
    #====== random seeds =======================================
    np.random.seed(SEED)
    tf.set_random_seed(SEED)

    sess = tf.InteractiveSession()
    # cnn settings
    data_gnames = ['density', 'curl']  # can be 'velocity-density' as one entry
    des_weight = [0.8660254, 0.5]  # norm(des_weight) should be 1
    des_models = [den_model, curl_model]
    matchEmax = 0.8
    holdEmax = 1.2

    BASEP_SIZE = PATCH_SIZE
    DES_LENGTH = 128 * (3 - 1)

    datasetnum = len(data_dirs)
    cnn_num = len(des_models)
    cnnInstList = [
    ]  # build two cnn graph, one for density, one for curl of vel
    for cnnI in range(cnn_num):
        cnnInstList.append(\
         MyCnnGraph( sess, 3, 1, 0, PATCH_SIZE, BASEP_SIZE,\
          True, 1e-3, 1e-5, [5,5,5,3], [1,1,1,1], [4,8,16,32], [1,0,1,0], \
          [DES_LENGTH], [DES_LENGTH*2, 1], (cnnI)*2, 0.0, 0.7 ))
        cnnInstList[cnnI].loadModel(des_models[cnnI])  # load trained model

    lamda = 0.01
    fadT = 30
    aftF = fadT * 2  # enough to fade in and fade out
    preF = 0
    if (anticipatP):  # fully visible when applying
        preF = fadT
        aftF = fadT
    patchRep = pr.PatchRepo(data_dirs,
                            ['file_den/libdata', 'file_curl/libdata'],
                            PATCH_SIZE, preF, aftF, des_weight)

    # solver params
    res = 50
    gs = vec3(res, res * 1.5, res)
    s = Solver(name='main', gridSize=gs, dim=3)
    s.timestep = 0.25
    timings = Timings()

    # prepare grids
    flags = s.create(FlagGrid)
    vel = s.create(MACGrid)
    density = s.create(RealGrid)
    pressure = s.create(RealGrid)

    # patch grids
    xlgs = vec3(res * 4, res * 6, res * 4)
    xl = Solver(name='xl', gridSize=xlgs, dim=3)
    hiDen = xl.create(RealGrid)
    patchDen = xl.create(RealGrid)
    weiG = xl.create(RealGrid)
    vec3weiG = xl.create(MACGrid)
    patchDen1 = xl.create(RealGrid)

    bWidth = 1
    flags.initDomain(boundaryWidth=bWidth)
    flags.fillGrid()

    setOpenBound(flags, bWidth, 'Y', FlagOutflow | FlagEmpty)

    if (GUI):
        gui = Gui()
        gui.show(True)
        #gui.pause()

    upz = vec3(0, 0.05, 0)
    cpos = vec3(0.5, 0.1, 0.5)
    source = s.create(Cylinder,
                      center=gs * cpos,
                      radius=res * 0.15,
                      z=gs * upz)
    noise = s.create(NoiseField, fixedSeed=265, loadFromFile=True)
    noise.posScale = vec3(20)
    noise.clamp = True
    noise.clampNeg = 0
    noise.clampPos = 2
    noise.valScale = 1
    noise.valOffset = 0.075
    noise.timeAnim = 0.3

    baseR = 12.0
    pp = s.create(PatchSynSystem,
                  subdiv=2,
                  baseRes=baseR,
                  jN=6,
                  anticipate=anticipatP)
    pp.saveLocalPerCellAcceleration(accSZ=vec3(BASEP_SIZE, BASEP_SIZE, 1))
    # acceleration for packing numpy arrays

    den_shape = [-1, BASEP_SIZE, BASEP_SIZE, BASEP_SIZE, 1]
    vel_shape = [-1, BASEP_SIZE, BASEP_SIZE, BASEP_SIZE, 3]
    initfadW = 0.0
    if (anticipatP): initfadW = 1.0

    #main loop
    for t in range(0, steps):
        print('Frame %d' % (t))

        advectSemiLagrange(flags=flags, vel=vel, grid=density, order=2)
        advectSemiLagrange(flags=flags,
                           vel=vel,
                           grid=vel,
                           order=2,
                           openBounds=True,
                           boundaryWidth=bWidth)
        resetOutflow(flags=flags, real=density)

        setWallBcs(flags=flags, vel=vel)
        addBuoyancy(density=density,
                    vel=vel,
                    gravity=vec3(0, -8e-3, 0),
                    flags=flags)

        solvePressure(flags=flags, vel=vel, pressure=pressure)
        setWallBcs(flags=flags, vel=vel)
        densityInflow(flags=flags,
                      density=density,
                      noise=noise,
                      shape=source,
                      scale=1,
                      sigma=0.5)

        # projectPpmFull( density, dirPath + 'den_%04d.ppm'% (t), 0, 1.7 )
        if (anticipatP and t >= 40):  # have to save for backward anticipation
            density.save(dirPath + 'den_%04d.uni' % (t))
            vel.save(dirPath + 'vel_%04d.uni' % (t))

        if (t >= 60):  # patch operations
            pp.AdvectWithControl(lamda=lamda,
                                 flags=flags,
                                 vel=vel,
                                 integrationMode=IntRK4,
                                 scaleLen=True)
            # sample new patches with cube cages
            pp.sampleCandidates(denG=density,
                                samWidth=16.0,
                                weiThresh=0.1,
                                occThresh=0.5)
            pp.initCandidateCage(denG=density)
            pp.addCandidatePatch()
            pp.initNewPatchInfo(initfadW)  # initfadW
            # pack local regions for descriptor calculation
            patchN = pp.pySize()
            realNum = 0
            if (patchN > 0):
                patdic2 = np.intc([-1] * patchN)
                denMin = np.array([0.0] * patchN, dtype=np.float32)
                denMax = np.array([1.0] * patchN, dtype=np.float32)
                for cnnI in range(cnn_num):
                    cnnBuffer = 0
                    if (data_gnames[cnnI] == ('velocity')):
                        patVGrids = np.array(
                            [0.0] * (patchN * BASEP_SIZE * BASEP_SIZE *
                                     BASEP_SIZE * 3),
                            dtype=np.float32)
                        realNum = pp.saveLocalPatchNumpyMAC(
                            vel, patVGrids, patdic2,
                            vec3(BASEP_SIZE, BASEP_SIZE, BASEP_SIZE))
                        if (realNum > 0):
                            cnnBuffer = (patVGrids.reshape(vel_shape)
                                         )[:realNum]  # nx36x36x36x3
                        del patVGrids
                    elif (data_gnames[cnnI] == ('curl')):
                        patCGrids = np.array(
                            [0.0] * (patchN * BASEP_SIZE * BASEP_SIZE *
                                     BASEP_SIZE * 3),
                            dtype=np.float32)
                        realNum = pp.saveLocalPatchNumpyCurl(
                            vel, patCGrids, patdic2,
                            vec3(BASEP_SIZE, BASEP_SIZE, BASEP_SIZE))
                        if (realNum > 0):
                            cnnBuffer = (patCGrids.reshape(vel_shape)
                                         )[:realNum]  # nx36x36x36x3 for 3d
                        del patCGrids
                    elif (data_gnames[cnnI] == ('density')):
                        patNGrids = np.array(
                            [0.0] * (patchN * BASEP_SIZE * BASEP_SIZE *
                                     BASEP_SIZE * 1),
                            dtype=np.float32)
                        realNum = pp.saveLocalPatchNumpyReal(
                            density, patNGrids, patdic2,
                            vec3(BASEP_SIZE, BASEP_SIZE, BASEP_SIZE))
                        if (realNum > 0):
                            cnnBuffer = (patNGrids.reshape(den_shape)
                                         )[:realNum]  # nx36x36x36x1
                        patNGrids = (patNGrids.reshape([patchN, -1]))[:realNum]
                        denMin = np.maximum(np.amin(patNGrids, axis=1),
                                            0)  # for scaling...
                        denMax = np.maximum(np.amax(patNGrids, axis=1),
                                            0)  # for scaling...
                        del patNGrids
                    if (realNum > 0):
                        cnnBuffer = np.nan_to_num(cnnBuffer)
                        run_dict = {cnnInstList[cnnI].base_grid: cnnBuffer}
                        desDataBuffer = sess.run(
                            cnnInstList[cnnI].l_branch_out,
                            feed_dict=run_dict)  #sp,realNum x FC_INNER_N[-1]
                        normDes = normalize(
                            desDataBuffer, axis=1,
                            norm='l2')  # shape, realNum x FC_INNER_N[-1]
                        if (cnnI == 0):
                            tarDes = np.array(normDes * des_weight[0],
                                              dtype=np.float32)
                        else:
                            tarDes = np.concatenate(
                                [tarDes, normDes * des_weight[cnnI]],
                                axis=len(tarDes.shape) - 1)
                        del normDes, desDataBuffer, cnnBuffer

            if (realNum > 0):
                old_matchList = np.array([0] * patchN, dtype=np.intc)
                denMinL = np.array([0.0] * patchN, dtype=np.float32)
                denMaxL = np.array([1.0] * patchN, dtype=np.float32)
                new_matchError = np.array([0.0] * patchN, dtype=np.float32)
                pp.getMatchList(getFadOut=True,
                                matchList=old_matchList,
                                tarMin=denMinL,
                                tarMax=denMaxL)
                newmatchList = np.copy(old_matchList)
                timeOutList = patchRep.getNextMatchError(newmatchList, new_matchError, \
                 tarDes, patdic2, matchEmax, holdEmax)
                # load repo patches
                patSynGrids = []
                patSynDict = []
                pnum = 0
                for pi in range(patchN):
                    if (newmatchList[pi] >= 0):
                        repoPath = patchRep.getPatchPath(newmatchList[pi])
                        pHead, pCont = uniio.readUni(repoPath)
                        #scale according to min max, denMin, denMax
                        gi = patdic2[pi]
                        if (gi >= 0 and old_matchList[pi] < 0):
                            pmin = np.amin(pCont)
                            pmax = np.amax(pCont)
                            scalefactor = (pmax - pmin)
                            if (scalefactor < 0.01):
                                pmax = pmax + 0.005
                                pmin = pmin - 0.005
                            scalefactor = (denMax[gi] - denMin[gi]) / (pmax -
                                                                       pmin)
                            denMinL[pi] = denMin[gi] - scalefactor * pmin
                            denMaxL[pi] = scalefactor
                        #pCont = (pCont - pmin) / scalefactor * (denMaxL[pi] - denMinL[pi]) + denMinL[pi]
                        pCont = pCont * denMaxL[pi] + denMinL[pi]

                        patSynGrids.append(pCont)
                        patSynDict.append(pnum)
                        pnum = pnum + 1
                    else:
                        patSynDict.append(-1)
                patSynGrids = np.array(patSynGrids, dtype=np.float32)
                patSynDict = np.intc(patSynDict)

                pp.setMatchList(newmatchList, new_matchError, denMinL, denMaxL)
                #pp.getMatchList( getFadOut = True, tarMin = denMinL, tarMax = denMaxL)
                pp.removeBad(80.0, timeOutList, density)
                pp.updateFading(1.0 / fadT)
                if (anticipatP): pp.anticipateStore(t)

                # acceleration for synthesis functions, should be called right before synthesis functions
                pp.synPerCellAcceleration(tarSZ=xlgs)
                pp.patchSynthesisReal( tarG = patchDen1, patchSZ = vec3(pHead['dimX'],pHead['dimY'],pHead['dimZ']), \
                 patchGrids = patSynGrids, patchDict = patSynDict, withSpaceW = True, weigG = weiG)
                if (anticipatP):  # save for synthesizing continuely
                    patchDen1.save(dirPath + 'Pden_%04d.uni' % (t))
                    weiG.save(dirPath + 'Pwei_%04d.uni' % (t))
                # scale and merge with base
                synthesisScale(patchDen1, weiG, density)
                if (anticipatP):
                    projectPpmFull(patchDen1,
                                   dirPath + 'PdenPre_%04d.ppm' % (t), 0, 1.7)
                else:
                    projectPpmFull(patchDen1, dirPath + 'Pden_%04d.ppm' % (t),
                                   0, 1.7)
                if (LOGs): pp.printLog(dirPath + 'Plog_%04d.log' % (t))
                pp.updateParts(compress=True
                               )  # simply increasing lifet, remove PNEW flags
            projectPpmFull(density, dirPath + 'base_%04d.ppm' % (t), 0, 1.7)
        #timings.display()
        s.step()

    pp.clearParts()
    if (anticipatP):  # patch anticipation
        projectPpmFull(patchDen1, dirPath + 'Pden_%04d.ppm' % (t - 1), 0, 1.7)
        t = t - 2
        while (t >= 40):
            print('Anticipating frame %d' % (t))
            density.load(dirPath + 'den_%04d.uni' % (t))
            vel.load(dirPath + 'vel_%04d.uni' % (t + 1))
            if (t >= 60):
                patchDen1.load(dirPath + 'Pden_%04d.uni' % (t))
                weiG.load(dirPath + 'Pwei_%04d.uni' % (t))
            else:
                patchDen1.setConst(0.0)
                weiG.setConst(0.0)

            vel.multConst(vec3(-1.0, -1.0, -1.0))
            pp.anticipateAdd(t + 1.0)
            patchN = pp.pySize()
            if (patchN <= 0):
                t = t - 1
                continue
            pp.AdvectWithControl(lamda=lamda,
                                 flags=flags,
                                 vel=vel,
                                 integrationMode=IntRK4)
            pp.updateFading(-1.0 / fadT)
            pp.removeBad(maxDefE=9999999.0, den=density)
            matchList = np.array([0] * patchN, dtype=np.intc)
            denMinL = np.array([0.0] * patchN, dtype=np.float32)
            denMaxL = np.array([1.0] * patchN, dtype=np.float32)
            pp.getMatchList(getFadOut=True,
                            matchList=matchList,
                            tarMin=denMinL,
                            tarMax=denMaxL)
            matchList = matchList - 1  # fading in
            pp.setMatchList(matchList)
            patSynGrids = []  # load repo patches
            patSynDict = []
            pnum = 0
            for pi in range(patchN):
                if (matchList[pi] >= 0):
                    repoPath = patchRep.getPatchPath(matchList[pi])
                    pHead, pCont = uniio.readUni(repoPath)
                    pCont = pCont * denMaxL[pi] + denMinL[pi]
                    patSynGrids.append(pCont)
                    patSynDict.append(pnum)
                    pnum = pnum + 1
                else:
                    patSynDict.append(-1)
            patSynGrids = np.array(patSynGrids, dtype=np.float32)
            patSynDict = np.intc(patSynDict)

            pp.synPerCellAcceleration(tarSZ=xlgs)
            pp.patchSynthesisReal( tarG = patchDen1, patchSZ = vec3(pHead['dimX'],pHead['dimY'],pHead['dimZ']), \
             patchGrids = patSynGrids, patchDict = patSynDict, withSpaceW = True, weigG = weiG, clear = False)
            if (LOGs): pp.printLog(dirPath + 'Plog_back_%04d.log' % (t))
            synthesisScale(patchDen1, weiG, density)
            projectPpmFull(patchDen1, dirPath + 'Pden_%04d.ppm' % (t), 0, 1.7)
            pp.updateParts(
                compress=True)  # simply increasing lifet, remove PNEW flags
            t = t - 1
コード例 #20
0
basePath = '../data/'

trainingEpochs = 2500
batchSize = 10
inSize = 64 * 64 * 1  # warning - hard coded to scalar values 64^2

# load data
densities = []

# start reading simSimple 1000 ff.
for sim in range(1000, 2000):
    if os.path.exists("%s/simSimple_%04d" % (basePath, sim)):
        for i in range(0, 100):
            filename = "%s/simSimple_%04d/density_%04d.uni"
            uniPath = filename % (basePath, sim, i)  # 100 files per sim
            header, content = uniio.readUni(uniPath)
            h = header['dimX']
            w = header['dimY']
            arr = np.reshape(content, [w, h])
            arr = arr[::-1]  # reverse order
            arr = np.reshape(arr, [w, h, 1])
            densities.append(arr)

loadNum = len(densities)
if loadNum < 200:
    print(
        "Error - use at least two full sims, generate data by running 'manta ./manta_genSimSimple.py' a few times..."
    )
    exit(1)

densities = np.reshape(densities, (len(densities), 64, 64, 1))