def standardrecon(config, base, pos, bias, R=8): bs, nc = config['boxsize'], config['nc'] basesm = tools.gauss(base, tools.fftk((nc, nc, nc), bs), R) g = tf.Graph() with g.as_default(): mesh = tf.constant(basesm.astype(np.float32)) meshk = tfpmfuncs.r2c3d(mesh, norm=nc**3) DX = tfpm.lpt1(meshk, pos, config) DX = tf.multiply(DX, -1 / bias) pos = tf.add(pos, DX) displaced = tf.zeros_like(mesh) displaced = tfpm.cic_paint(displaced, pos, boxsize=bs, name='displaced') DXrandom = tfpm.lpt1(meshk, config['grid'], config) DXrandom = tf.multiply(DXrandom, -1 / bias) posrandom = tf.add(config['grid'], DXrandom) random = tf.zeros_like(mesh) random = tfpm.cic_paint(random, posrandom, boxsize=bs, name='random') tf.add_to_collection('recon', [displaced, random]) return g
def __init__(self, nc, bs, bias, errormesh, a0=0.1, af=1.0, nsteps=5, nbody=True, lpt_order=2, anneal=True, prior=True): self.nc = nc self.bs = bs self.bias = bias self.errormesh = errormesh self.a0, self.af, self.nsteps = a0, af, nsteps self.stages = np.linspace(a0, af, nsteps, endpoint=True) self.nbody = nbody self.lpt_order = lpt_order self.anneal = True self.klin = np.loadtxt('../../data/Planck15_a1p00.txt').T[0].astype( np.float32) self.plin = np.loadtxt('../../data/Planck15_a1p00.txt').T[1].astype( np.float32) self.ipklin = iuspline(self.klin, self.plin) # Compute necessary Fourier kernels self.kvec = tools.fftk((nc, nc, nc), boxsize=bs, symmetric=False) self.kmesh = (sum(k**2 for k in self.kvec)**0.5).astype(np.float32) self.priorwt = self.ipklin(self.kmesh) self.R0 = tf.constant(0.) self.prior = prior
def shear(base): '''Takes in a PMesh object in real space. Returns am array of shear''' s2 = np.zeros_like(base) nc = base.shape[0] kk = tools.fftk([nc, nc, nc], boxsize=1) k2 = sum(ki**2 for ki in kk) k2[0,0,0] = 1 basec = np.fft.rfftn(base, norm='ortho') for i in range(3): for j in range(i, 3): tmp = basec * (kk[i]*kk[j] / k2 - diracdelta(i, j)/3.) baser = np.fft.irfftn(tmp, norm='ortho') s2[...] += baser**2 if i != j: s2[...] += baser**2 return s2
def check_module(modpath): print('Test module') tf.reset_default_graph() module = hub.Module(modpath + '/likelihood/') xx = tf.placeholder(tf.float32, shape=[None, None, None, None, nchannels], name='input') yy = tf.placeholder(tf.float32, shape=[None, None, None, None, ntargets], name='labels') samples = module(dict(features=xx, labels=yy), as_dict=True)['sample'] loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood'] preds = {} with tf.Session() as sess: sess.run(tf.initializers.global_variables()) for seed in seeds: xxm = np.pad(meshes[seed][0]['cic'], pad, 'wrap') #yym = np.stack([np.pad(meshes[seed][1]['pnncen'], pad, 'wrap'), np.pad(meshes[seed][1]['pnnsat'], pad, 'wrap')], axis=-1) yym = np.stack([meshes[seed][1][i] for i in tgname], axis=-1) print(xxm.shape, yym.shape) preds[seed] = sess.run(samples, feed_dict={ xx: np.expand_dims(np.expand_dims(xxm, -1), 0), yy: np.expand_dims(yym, 0) }) meshes[seed][0]['predict'] = preds[seed][:, :, :, :].sum(axis=-1) meshes[seed][0]['predictcen'] = preds[seed][:, :, :, 0] meshes[seed][0]['predictsat'] = preds[seed][:, :, :, 1] ############################## ##Power spectrum shape = [nc, nc, nc] kk = tools.fftk(shape, bs) kmesh = sum(i**2 for i in kk)**0.5 fig, ax = plt.subplots(2, 3, figsize=(12, 8)) for seed in seeds: for i, key in enumerate(['cen', 'sat']): predict, hpmeshd = meshes[seed][0]['predict%s' % key], meshes[seed][1]['pnn%s' % key], k, pkpred = tools.power(predict / predict.mean(), boxsize=bs, k=kmesh) k, pkhd = tools.power(hpmeshd / hpmeshd.mean(), boxsize=bs, k=kmesh) k, pkhx = tools.power(hpmeshd / hpmeshd.mean(), predict / predict.mean(), boxsize=bs, k=kmesh) ## ax[0, i].semilogx(k[1:], pkpred[1:] / pkhd[1:], label=seed) ax[1, i].semilogx(k[1:], pkhx[1:] / (pkpred[1:] * pkhd[1:])**0.5) ax[0, i].set_title(key, fontsize=12) i = 2 predict, hpmeshd = meshes[seed][0]['predictcen']+meshes[seed][0]['predictsat'] ,\ meshes[seed][1]['pnncen']+meshes[seed][1]['pnnsat'] k, pkpred = tools.power(predict / predict.mean(), boxsize=bs, k=kmesh) k, pkhd = tools.power(hpmeshd / hpmeshd.mean(), boxsize=bs, k=kmesh) k, pkhx = tools.power(hpmeshd / hpmeshd.mean(), predict / predict.mean(), boxsize=bs, k=kmesh) ## ax[0, i].semilogx(k[1:], pkpred[1:] / pkhd[1:], label=seed, ls='-') ax[1, i].semilogx(k[1:], pkhx[1:] / (pkpred[1:] * pkhd[1:])**0.5, ls='-') for axis in ax.flatten(): axis.legend(fontsize=14) axis.set_yticks(np.arange(0, 1.1, 0.1)) axis.grid(which='both') axis.set_ylim(0., 1.1) ax[0, i].set_title('All Gal', fontsize=15) ax[0, 0].set_ylabel('Transfer function', fontsize=14) ax[1, 0].set_ylabel('Cross correlation', fontsize=14) plt.savefig(savepath + '/2pt%d.png' % max_steps) plt.show() # ################################################## fig, ax = plt.subplots(2, 3, figsize=(12, 8)) for i, key in enumerate(['cen', 'sat']): predict, hpmeshd = meshes[seed][0]['predict%s' % key], meshes[seed][1]['pnn%s' % key], vmin, vmax = 0, (hpmeshd[:, :, :].sum(axis=0)).max() im = ax[0, i].imshow(predict[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) im = ax[1, i].imshow(hpmeshd[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) ax[0, i].set_title(key, fontsize=15) i = 2 predict, hpmeshd = meshes[seed][0]['predictcen']+meshes[seed][0]['predictsat'] ,\ meshes[seed][1]['pnncen']+meshes[seed][1]['pnnsat'] im = ax[0, i].imshow(predict[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) im = ax[1, i].imshow(hpmeshd[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) ax[0, i].set_title('All Gal', fontsize=15) ax[0, 0].set_ylabel('Prediction', fontsize=15) ax[1, 0].set_ylabel('Truth', fontsize=15) plt.savefig(savepath + '/imshow%d.png' % max_steps) plt.show()
from tensorflow import set_random_seed set_random_seed(seed_in) bs = 400 nc, ncf = 128, 512 ncp = 128 step, stepf = 5, 40 path = './../data/z00/' ftype = 'L%04d_N%04d_S%04d_%02dstep/' numd = 1e-3 num = int(numd * bs**3) R1 = 3 R2 = 3 * 1.2 kny = np.pi * ncp / bs kk = tools.fftk((ncp, ncp, ncp), bs) seeds = [100, 200, 300, 400] rprob = 0.5 ############################# suff = 'pad2-logistic' fname = open('./models/n10/README', 'a+', 1) fname.write( '%s \t :\n\tModel to predict central and satellite likelihood in trainestmodvargal.py with data supplemented by size=8, 16, 32, 64;, 128; rotation with probability=0.5 and padding the mesh with 2 cells. Also reduce learning rate in piecewise constant manner. Changed teh n_y=1 and high of quntized distribution to 4\n' % suff) fname.close() savepath = './models/galmodel/%s/' % suff #if not os.path.exists(savepath): # os.makedirs(savepath)
def check_module(modpath): print('\nTest module\n') tf.reset_default_graph() module = hub.Module(modpath + '/likelihood/') xx = tf.placeholder(tf.float32, shape=[None, None, None, None, nchannels], name='input') yy = tf.placeholder(tf.float32, shape=[None, None, None, None, ntargets], name='labels') samples = module(dict(features=xx, labels=yy), as_dict=True)['sample'] loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood'] preds = {} with tf.Session() as sess: sess.run(tf.initializers.global_variables()) for seed in vseeds: xxm = np.stack( [np.pad(vmeshes[seed][0][i], pad, 'wrap') for i in ftname], axis=-1) #yym = np.stack([np.pad(vmeshes[seed][1]['pnncen'], pad, 'wrap'), np.pad(vmeshes[seed][1]['pnnsat'], pad, 'wrap')], axis=-1) yym = np.stack([vmeshes[seed][1][i] for i in tgname], axis=-1) print('xxm, yym shape = ', xxm.shape, yym.shape) print('xxm :', xxm.mean(), xxm.std()) print('yym :', yym.mean(), yym.std()) preds[seed] = sess.run(samples, feed_dict={ xx: np.expand_dims(xxm, 0), yy: 0 * np.expand_dims(yym, 0) }) vmeshes[seed][0]['predict'] = np.squeeze(preds[seed]) ############################## ##Power spectrum shape = [nc, nc, nc] kk = tools.fftk(shape, bs) kmesh = sum(i**2 for i in kk)**0.5 fig, axar = plt.subplots(2, 2, figsize=(8, 8)) ax = axar[0] for seed in vseeds: for i, key in enumerate(['']): predict, hpmeshd = vmeshes[seed][0][ 'predict%s' % key], vmeshes[seed][1][tgname[0]], if predict.mean() < 1e-3: predict += 1 if hpmeshd.mean() < 1e-3: hpmeshd += 1 k, pkpred = tools.power(predict / predict.mean(), boxsize=bs, k=kmesh) k, pkhd = tools.power(hpmeshd / hpmeshd.mean(), boxsize=bs, k=kmesh) k, pkhx = tools.power(hpmeshd / hpmeshd.mean(), predict / predict.mean(), boxsize=bs, k=kmesh) ## ax[0].semilogx(k[1:], pkpred[1:] / pkhd[1:], label=seed) ax[1].semilogx(k[1:], pkhx[1:] / (pkpred[1:] * pkhd[1:])**0.5) ax[0].set_title(key, fontsize=12) for axis in ax.flatten(): axis.legend(fontsize=14) axis.set_yticks(np.arange(0, 1.2, 0.1)) axis.grid(which='both') axis.set_ylim(0., 1.1) ax[0].set_ylabel('Transfer function', fontsize=14) ax[1].set_ylabel('Cross correlation', fontsize=14) # ax = axar[1] for i, key in enumerate(['']): predict, hpmeshd = vmeshes[seed][0]['predict%s' % key], vmeshes[seed][1][tgname[0]], vmin, vmax = 0, (hpmeshd[:, :, :].sum(axis=0)).max() #im = ax[0].imshow(predict[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) #im = ax[1].imshow(hpmeshd[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) im = ax[0].imshow(predict[:, :, :].sum(axis=0)) plt.colorbar(im, ax=ax[0]) im = ax[1].imshow(hpmeshd[:, :, :].sum(axis=0)) plt.colorbar(im, ax=ax[1]) ax[0].set_title(key, fontsize=15) ax[0].set_title('Prediction', fontsize=15) ax[1].set_title('Truth', fontsize=15) plt.savefig(savepath + '/vpredict%d.png' % max_steps) plt.show() plt.figure() plt.hist(vmeshes[100][0]['predict'].flatten(), range=(-5, 5), bins=100) plt.hist(vmeshes[100][1][tgname[0]].flatten(), alpha=0.5, range=(-5, 5), bins=100) plt.savefig(savepath + '/hist%d.png' % max_steps) plt.show() dosampletrue = False if max_steps in [ 50, 100, 500, 1000, 5000, 15000, 25000, 35000, 45000, 55000, 65000 ]: dosampletrue = True csize = 16 if max_steps in [3000, 10000, 20000, 30000, 40000, 50000, 60000, 70000]: dosampletrue = True csize = 32 if dosampletrue: sampletrue(modpath, csize)
from tensorflow import set_random_seed set_random_seed(seed_in) bs = 400 nc, ncf = 128, 512 step, stepf = 5, 40 path = '../../data/z00/' ftype = 'L%04d_N%04d_S%04d_%02dstep/' ftypefpm = 'L%04d_N%04d_S%04d_%02dstep_fpm/' numd = 1e-3 num = int(numd * bs**3) R1 = 10 R2 = 3 * 1.2 kny = np.pi * nc / bs kk = tools.fftk((nc, nc, nc), bs) #seeds = [100] seeds = [100, 200, 300, 400, 500, 600, 700] vseeds = [100, 300, 800, 900] ############################# suff = 'pad0-pixpmnnd-invf8' normwt = 0.7 nfilter0 = 1 pad = int(0) #fname = open('../models/n10/README', 'a+', 1) #fname.write('%s \t :\n\tModel to predict halo position likelihood in halo_logistic with data supplemented by size=8, 16, 32, 64, 128; rotation with probability=0.5 and padding the mesh with 2 cells. Also reduce learning rate in piecewise constant manner. n_y=1 and high of quntized distribution to 3. Init field as 1 feature & high learning rate\n'%suff) #fname.close()
bs, nc = 400, 128 z = 0 nsteps = 5 seed = 100 seeds = np.arange(100, 1100, 100) for seed in seeds: print('\nDo for seed = %d\n' % seed) #Setup conf = Config(bs=bs, nc=nc, seed=seed) pm = conf.pm assert conf['stages'].size == nsteps grid = pm.generate_uniform_particle_grid(shift=0).astype(np.float32) kvec = tools.fftk((nc, nc, nc), bs, dtype=np.float32, symmetric=False) solver = Solver(pm, conf['cosmology']) conf['kvec'] = kvec conf['grid'] = grid #PM #whitec = pm.generate_whitenoise(seed, mode='complex', unitary=False) #lineark = whitec.apply(lambda k, v:Planck15.get_pklin(sum(ki ** 2 for ki in k)**0.5, 0) ** 0.5 * v / v.BoxSize.prod() ** 0.5) #linear = lineark.c2r() linear = BigFileMesh( '/project/projectdirs/astro250/chmodi/cosmo4d/data/z00/L0400_N0128_S0100_05step/mesh/', 's').paint() lineark = linear.r2c() state = solver.lpt(lineark, grid, conf['stages'][0], order=2) solver.nbody(state, leapfrog(conf['stages'])) final = pm.paint(state.X)
defdict = {} defdict['bs'] = 400 defdict['nc'], defdict['ncf'] = 128, 512 defdict['step'], defdict['stepf'] = 5, 40 defdict['path'] = '../../data/z00/' defdict['ftype'] = 'L%04d_N%04d_S%04d_%02dstep/' defdict['ftypefpm'] = 'L%04d_N%04d_S%04d_%02dstep_fpm/' defdict['numd'] = 1e-3 defdict['R1'] = 3 defdict['R2'] = 3*1.2 # defdict['num'] = int(defdict['numd']*defdict['bs']**3) defdict['kny'] = np.pi*defdict['nc']/defdict['bs'] defdict['kk'] = tools.fftk((defdict['nc'], defdict['nc'], defdict['nc']), defdict['bs']) defdict['seeds'] = [100, 200, 300, 400] defdict['vseeds'] = [100, 300, 800, 900] defdict['rprob'] = 0.5 def get_meshes(seed, pdict=defdict): for i in pdict.keys(): locals()[i] = pdict[i] mesh = {} mesh['s'] = tools.readbigfile(path + ftypefpm%(bs, nc, seed, step) + 'mesh/s/') partp = tools.readbigfile(path + ftypefpm%(bs, nc, seed, step) + 'dynamic/1/Position/') mesh['cic'] = tools.paintcic(partp, bs, ncp) #mesh['decic'] = tools.decic(mesh['cic'], kk, kny) mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
cscratch = "../figs_recon/" nc, bs = 64, 200 a0, a, nsteps = 0.1, 1.0, 5 stages = np.linspace(a0, a, nsteps, endpoint=True) anneal = True niter = 200 optimizer = 'adam' lr = 0.01 RRs = [2, 1, 0.5, 0] klin = np.loadtxt('..//data/Planck15_a1p00.txt').T[0].astype(np.float32) plin = np.loadtxt('..//data/Planck15_a1p00.txt').T[1].astype(np.float32) ipklin = iuspline(klin, plin) # Compute necessary Fourier kernels kvec = tools.fftk((nc, nc, nc), boxsize=bs, symmetric=False) kmesh = (sum(k**2 for k in kvec)**0.5).astype(np.float32) priorwt = ipklin(kmesh) fpath = "./tmp/dm-tf2-%s-%d/" % (optimizer, nc) for ff in [fpath]: #for ff in [fpath, fpath + '/figs']: try: os.makedirs(ff) except Exception as e: print(e) dtype = tf.float32 @tf.function
set_random_seed(seed_in) bss, ncc = [100, 200], [32, 64] batch_size = [32, 8] cube_sizes = np.array(ncc) nsizes = len(cube_sizes) bsnclist = list(zip(bss, ncc)) for i in bsnclist: print(i) numd = 1e-3 num = [int(numd * bs**3) for bs in bss] R1 = 3 R2 = 3 * 1.2 knylist = [np.pi * nc / bs for nc, bs in bsnclist] kklist = [tools.fftk((nc, nc, nc), bs) for nc, bs in bsnclist] ############################# stellar = False distribution = 'normal' #suff = 'pad0-pix-Hpnn-map8-mix4' suff = 'pad0-pix-cic-pcicmdncmask4normmix-map8-4normmix' n_mixture = 4 pad = int(0) savepath = '../models/n10/%s/' % suff try: os.makedirs(savepath) except:
def main(_): infield = True dtype = tf.float32 mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape) nc, bs = FLAGS.nc, FLAGS.box_size a0, a, nsteps = FLAGS.a0, FLAGS.af, FLAGS.nsteps stages = np.linspace(a0, a, nsteps, endpoint=True) numd = 1e-3 ##Begin here klin = np.loadtxt('../data/Planck15_a1p00.txt').T[0] plin = np.loadtxt('../data/Planck15_a1p00.txt').T[1] ipklin = iuspline(klin, plin) #pypath = '/global/cscratch1/sd/chmodi/cosmo4d/output/version2/L0400_N0128_05step-fof/lhd_S0100/n10/opt_s999_iM12-sm3v25off/meshes/' final = tools.readbigfile('../data//L0400_N0128_S0100_05step/mesh/d/') ic = tools.readbigfile('../data/L0400_N0128_S0100_05step/mesh/s/') fpos = tools.readbigfile( '../data/L0400_N0128_S0100_05step/dynamic/1/Position/') hpos = tools.readbigfile( '../data/L0400_N0512_S0100_40step/FOF/PeakPosition//')[1:int(bs**3 * numd)] hmass = tools.readbigfile( '../data/L0400_N0512_S0100_40step/FOF/Mass//')[1:int(bs**3 * numd)].flatten() meshpos = tools.paintcic(hpos, bs, nc) meshmass = tools.paintcic(hpos, bs, nc, hmass.flatten() * 1e10) data = meshmass data /= data.mean() data -= 1 kv = tools.fftk([nc, nc, nc], bs, symmetric=True, dtype=np.float32) datasm = tools.fingauss(data, kv, 3, np.pi * nc / bs) ic, data = np.expand_dims(ic, 0), np.expand_dims(data, 0).astype(np.float32) datasm = np.expand_dims(datasm, 0).astype(np.float32) print("Min in data : %0.4e" % datasm.min()) np.save(fpath + 'ic', ic) np.save(fpath + 'data', data) #################################################### # tf.reset_default_graph() tfic = tf.constant(ic.astype(np.float32)) state = lpt_init(tfic, a0=0.1, order=1) final_state = nbody(state, stages, FLAGS.nc) tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0]) with tf.Session() as sess: state = sess.run(final_state) fpos = state[0, 0] * bs / nc bparams, bmodel = getbias(bs, nc, data[0] + 1, ic[0], fpos) #bmodel += 1 #np.expand_dims(bmodel, 0) + 1 errormesh = data - np.expand_dims(bmodel, 0) kerror, perror = tools.power(errormesh[0] + 1, boxsize=bs) kerror, perror = kerror[1:], perror[1:] print("Error power spectra", kerror, perror) print("\nkerror", kerror.min(), kerror.max(), "\n") print("\nperror", perror.min(), perror.max(), "\n") suff = "-error" dg.saveimfig(suff, [ic, errormesh], [ic, data], fpath + '/figs/') dg.save2ptfig(suff, [ic, errormesh], [ic, data], fpath + '/figs/', bs) ipkerror = iuspline(kerror, perror) #################################################### #stdinit = srecon.standardinit(bs, nc, meshpos, hpos, final, R=8) recon_estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=fpath) def predict_input_fn(data=data, M0=0., w=3., R0=0., off=None, istd=None, x0=None): features = {} features['datasm'] = data features['R0'] = R0 features['x0'] = x0 features['bparams'] = bparams features['ipkerror'] = [kerror, perror] #ipkerror return features, None eval_results = recon_estimator.predict( input_fn=lambda: predict_input_fn(x0=ic), yield_single_examples=False) for i, pred in enumerate(eval_results): if i > 0: break suff = '-model' dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/') dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/', bs) np.save(fpath + '/reconmeshes/ic_true' + suff, pred['ic']) np.save(fpath + '/reconmeshes/fin_true' + suff, pred['final']) np.save(fpath + '/reconmeshes/model_true' + suff, pred['model']) # randominit = np.random.normal(size=data.size).reshape(data.shape) #eval_results = recon_estimator.predict(input_fn=lambda : predict_input_fn(x0 = np.expand_dims(stdinit, 0)), yield_single_examples=False) eval_results = recon_estimator.predict( input_fn=lambda: predict_input_fn(x0=randominit), yield_single_examples=False) for i, pred in enumerate(eval_results): if i > 0: break suff = '-init' dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/') dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/', bs) np.save(fpath + '/reconmeshes/ic_init' + suff, pred['ic']) np.save(fpath + '/reconmeshes/fin_init' + suff, pred['final']) np.save(fpath + '/reconmeshes/model_init' + suff, pred['model']) # # Train and evaluate model. RRs = [4., 2., 1., 0.5, 0.] niter = 100 iiter = 0 for R0 in RRs: print('\nFor iteration %d\n' % iiter) print('With R0=%0.2f \n' % (R0)) def train_input_fn(): features = {} features['datasm'] = data features['R0'] = R0 features['bparams'] = bparams features['ipkerror'] = [kerror, perror] #ipkerror #features['x0'] = np.expand_dims(stdinit, 0) features['x0'] = randominit features['lr'] = 0.01 return features, None recon_estimator.train(input_fn=train_input_fn, max_steps=iiter + niter) eval_results = recon_estimator.predict(input_fn=predict_input_fn, yield_single_examples=False) for i, pred in enumerate(eval_results): if i > 0: break iiter += niter # suff = '-%d-R%d' % (iiter, R0) dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/') dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/', bs) np.save(fpath + '/reconmeshes/ic' + suff, pred['ic']) np.save(fpath + '/reconmeshes/fin' + suff, pred['final']) np.save(fpath + '/reconmeshes/model' + suff, pred['model']) sys.exit(0) ## exit(0)
import tools from getbiasparams import getbias import tensorflow as tf posdata = True bs = 1000 nc = 64 ncf = 512 nsteps = 3 nstepsf = 20 numd = 3e-4 num = int(numd*bs**3) R = 256 kvsym = tools.fftk([nc, nc, nc], bs, symmetric=True, dtype=np.float32) #kv = kernels.fftk([nc, nc, nc], symmetric=False, dtype=base.dtype) # Compute necessary Fourier kernels kvec = tools.fftk((nc, nc, nc), boxsize=bs, symmetric=False) kmesh = (sum(k**2 for k in kvec)**0.5).astype(np.float32) @tf.function def standardrecon(base, pos, bias, R): #base = base.astype(np.float32) #pos = pos.astype(base.dtype) smwts = tf.exp(tf.multiply(-kmesh**2, R**2)) basek = utils.r2c3d(base, norm=nc**3) basek = tf.multiply(basek, tf.cast(smwts, tf.complex64)) basesm = utils.c2r3d(basek, norm=nc**3)
defdict['numd'] = 1e-3 defdict['R1'] = 3 defdict['R2'] = 3 * 1.2 defdict['seeds'] = [100, 200, 300, 400] defdict['vseeds'] = [100, 300, 800, 900] defdict['rprob'] = 0.5 for i in defdict.keys(): locals()[i] = defdict[i] pdict = {} for i in defdict.keys(): pdict[i] = defdict[i] pdict['num'] = int(numd * bs**3) pdict['kny'] = np.pi * nc / bs pdict['kk'] = tools.fftk((nc, nc, nc), bs) #bs = 400 #nc, ncf = 128, 512 #step, stepf = 5, 40 #path = '../../data/z00/' #ftype = 'L%04d_N%04d_S%04d_%02dstep/' #ftypefpm = 'L%04d_N%04d_S%04d_%02dstep_fpm/' #numd = 1e-3 #num = int(numd*bs**3) #R1 = 3 #R2 = 3*1.2 #kny = np.pi*nc/bs #kk = tools.fftk((nc, nc, nc), bs) #seeds = [100, 200, 300, 400] #vseeds = [100, 300, 800, 900] #rprob = 0.5
def main(_): infield = True dtype = tf.float32 mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape) nc, bs = FLAGS.nc, FLAGS.box_size a0, a, nsteps = FLAGS.a0, FLAGS.af, FLAGS.nsteps stages = np.linspace(a0, a, nsteps, endpoint=True) numd = 1e-3 startw = time.time() print(mesh_shape) #layout_rules = mtf.convert_to_layout_rules(FLAGS.layout) #mesh_shape = [("row", FLAGS.nx), ("col", FLAGS.ny)] layout_rules = [("nx_lr", "row"), ("ny_lr", "col"), ("nx", "row"), ("ny", "col"), ("ty", "row"), ("tz", "col"), ("ty_lr", "row"), ("tz_lr", "col"), ("nx_block", "row"), ("ny_block", "col")] # Resolve the cluster from SLURM environment cluster = tf.distribute.cluster_resolver.SlurmClusterResolver( {"mesh": mesh_shape.size // FLAGS.gpus_per_task}, port_base=8822, gpus_per_node=FLAGS.gpus_per_node, gpus_per_task=FLAGS.gpus_per_task, tasks_per_node=FLAGS.tasks_per_node) cluster_spec = cluster.cluster_spec() print(cluster_spec) # Create a server for all mesh members server = tf.distribute.Server(cluster_spec, "mesh", cluster.task_id) print(server) if cluster.task_id > 0: server.join() # Otherwise we are the main task, let's define the devices devices = [ "/job:mesh/task:%d/device:GPU:%d" % (i, j) for i in range(cluster_spec.num_tasks("mesh")) for j in range(FLAGS.gpus_per_task) ] print("List of devices", devices) mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl( mesh_shape, layout_rules, devices) ##Begin here klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0] plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1] ipklin = iuspline(klin, plin) final = tools.readbigfile( '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/mesh/d/' ) ic = tools.readbigfile( '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/mesh/s/' ) pypath = '/global/cscratch1/sd/chmodi/cosmo4d/output/version2/L0400_N0128_05step-fof/lhd_S0100/n10/opt_s999_iM12-sm3v25off/meshes/' fin = tools.readbigfile(pypath + 'decic//') hpos = tools.readbigfile( '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/PeakPosition//' )[1:int(bs**3 * numd)] hmass = tools.readbigfile( '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/Mass//' )[1:int(bs**3 * numd)].flatten() #meshpos = tools.paintcic(hpos, bs, nc) meshmass = tools.paintcic(hpos, bs, nc, hmass.flatten() * 1e10) data = meshmass kv = tools.fftk([nc, nc, nc], bs, symmetric=True, dtype=np.float32) datasm = tools.fingauss(data, kv, 3, np.pi * nc / bs) ic, data = np.expand_dims(ic, 0), np.expand_dims(data, 0).astype(np.float32) datasm = np.expand_dims(datasm, 0).astype(np.float32) print("Min in data : %0.4e" % datasm.min()) ic, data = np.expand_dims(ic, 0), np.expand_dims(data, 0).astype(np.float32) np.save(fpath + 'ic', ic) np.save(fpath + 'data', data) #################################################### tf.reset_default_graph() print('ic constructed') graph = mtf.Graph() mesh = mtf.Mesh(graph, "my_mesh") initial_conditions, data_field, loss, var_grads, update_op, linear_op, input_field, lr, R0, M0, width, chisq, prior, tf_off, tf_istd = recon_prototype( mesh, datasm, nc=FLAGS.nc, batch_size=FLAGS.batch_size, dtype=dtype) # Lower mesh computation start = time.time() lowering = mtf.Lowering(graph, {mesh: mesh_impl}) restore_hook = mtf.MtfRestoreHook(lowering) end = time.time() print('\n Time for lowering : %f \n' % (end - start)) tf_initc = lowering.export_to_tf_tensor(initial_conditions) tf_data = lowering.export_to_tf_tensor(data_field) tf_chisq = lowering.export_to_tf_tensor(chisq) tf_prior = lowering.export_to_tf_tensor(prior) tf_grads = lowering.export_to_tf_tensor(var_grads[0]) #tf_lr = lowering.export_to_tf_tensor(lr) tf_linear_op = lowering.lowered_operation(linear_op) tf_update_ops = lowering.lowered_operation(update_op) n_block_x, n_block_y, n_block_z = FLAGS.nx, FLAGS.ny, 1 nc = FLAGS.nc with tf.Session(server.target) as sess: start = time.time() sess.run(tf_linear_op, feed_dict={input_field: ic}) ic_check, data_check = sess.run([tf_initc, tf_data], {width: 3}) dg.saveimfig('-check', [ic_check, data_check], [ic, data], fpath + '/figs/') dg.save2ptfig('-check', [ic_check, data_check], [ic, data], fpath + '/figs/', bs) print('Total time taken for mesh thingy is : ', time.time() - start) sess.run(tf_linear_op, feed_dict={ input_field: np.random.normal(size=ic.size).reshape(ic.shape) }) ic0, data0 = sess.run([tf_initc, tf_data], {width: 3}) dg.saveimfig('-init', [ic0, data0], [ic, data], fpath) start = time.time() titer = 20 niter = 101 iiter = 0 start0 = time.time() RRs = [4, 2, 1, 0.5, 0] wws = [1, 2, 3] lrs = np.array([0.1, 0.1, 0.1, 0.1, 0.1]) * 2 #lrs = [0.1, 0.05, 0.01, 0.005, 0.001] readin = True mm0, ww0, RR0 = 1e12, 3, 0.5 if readin: icread = np.load(fpath + '/figs-M%02d-R%02d-w%01d/ic_recon.npy' % (np.log10(mm0), 10 * RR0, ww0)) sess.run(tf_linear_op, feed_dict={input_field: icread}) for mm in [1e12, 1e11]: print('Fraction of points above 1 for mm = %0.2e: ' % mm, (datasm > mm).sum() / datasm.size) noisefile = '/project/projectdirs/m3058/chmodi/cosmo4d/train/L0400_N0128_05step-n10/width_3/Wts_30_10_1/r1rf1/hlim-13_nreg-43_batch-5/eluWts-10_5_1/blim-20_nreg-23_batch-100/hist_M%d_na.txt' % ( np.log10(mm) * 10) offset, ivar = setnoise(datasm, noisefile, noisevar=0.25) for iR, zlR in enumerate(zip(RRs, lrs)): RR, lR = zlR for ww in wws: for ff in [ fpath + '/figs-M%02d-R%02d-w%01d' % (np.log10(mm), 10 * RR, ww) ]: try: os.makedirs(ff) except Exception as e: print(e) if readin: if mm > mm0: continue elif mm == mm0 and RR > RR0: print(RR, RR0, RRs) continue elif RR == RR0 and ww <= ww0: print(ww, ww0, wws) continue else: print('Starting from %0.2e' % mm, RR, ww) print('Do for %0.2e' % mm, RR, ww) for i in range(niters[iR]): iiter += 1 sess.run( tf_update_ops, { lr: lR, M0: mm, R0: RR, width: ww, tf_off: offset, tf_istd: ivar**0.5 }) if (i % titer == 0): end = time.time() print('Iter : ', i) print('Time taken for %d iterations: ' % titer, end - start) start = end ## ic1, data1, cc, pp = sess.run( [tf_initc, tf_data, tf_chisq, tf_prior], { M0: mm, R0: RR, width: ww, tf_off: offset, tf_istd: ivar**0.5 }) print('Chisq and prior are : ', cc, pp) dg.saveimfig(i, [ic1, data1], [ic, data], ff) dg.save2ptfig(i, [ic1, data1], [ic, data], ff, bs) ic1, data1 = sess.run([tf_initc, tf_data], {width: ww}) np.save(ff + '/ic_recon', ic1) np.save(ff + '/data_recon', data1) dg.saveimfig(iiter, [ic1, data1], [ic, data], fpath + '/figs') dg.save2ptfig(iiter, [ic1, data1], [ic, data], fpath + '/figs', bs) wws = [3] RRs = [0] niters = [201, 101, 201] lrs = np.array([0.1, 0.1, 0.1]) ic1, data1 = sess.run([tf_initc, tf_data], {width: 3}) print('Total time taken for %d iterations is : ' % iiter, time.time() - start0) dg.saveimfig('', [ic1, data1], [ic, data], fpath) dg.save2ptfig('', [ic1, data1], [ic, data], fpath, bs) np.save(fpath + 'ic_recon', ic1) np.save(fpath + 'data_recon', data1) print('Total wallclock time is : ', time.time() - start0) ## exit(0)
def main(_): infield = True dtype = tf.float32 mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape) nc, bs = FLAGS.nc, FLAGS.box_size a0, a, nsteps = FLAGS.a0, FLAGS.af, FLAGS.nsteps stages = np.linspace(a0, a, nsteps, endpoint=True) numd = 1e-3 ##Begin here klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0] plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1] ipklin = iuspline(klin, plin) #pypath = '/global/cscratch1/sd/chmodi/cosmo4d/output/version2/L0400_N0128_05step-fof/lhd_S0100/n10/opt_s999_iM12-sm3v25off/meshes/' final = tools.readbigfile( '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/mesh/d/' ) ic = tools.readbigfile( '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/mesh/s/' ) fpos = tools.readbigfile( '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/dynamic/1/Position/' ) aa = 1 zz = 1 / aa - 1 rsdfactor = float(100 / (aa**2 * cosmo.H(zz).value**1)) print('\nRsdfactor used is : ', rsdfactor) hpos = tools.readbigfile( '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/PeakPosition//' )[1:int(bs**3 * numd)] hvel = tools.readbigfile( '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/CMVelocity//' )[1:int(bs**3 * numd)] rsdpos = hpos + hvel * rsdfactor * np.array([0, 0, 1]) print('Effective displacement : ', (hvel[:, -1] * rsdfactor).std()) hmass = tools.readbigfile( '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/Mass//' )[1:int(bs**3 * numd)].flatten() meshpos = tools.paintcic(rsdpos, bs, nc) meshmass = tools.paintcic(rsdpos, bs, nc, hmass.flatten() * 1e10) data = meshmass kv = tools.fftk([nc, nc, nc], bs, symmetric=True, dtype=np.float32) datasm = tools.fingauss(data, kv, 3, np.pi * nc / bs) ic, data = np.expand_dims(ic, 0), np.expand_dims(data, 0).astype(np.float32) datasm = np.expand_dims(datasm, 0).astype(np.float32) print("Min in data : %0.4e" % datasm.min()) # #################################################### stdinit = srecon.standardinit(bs, nc, meshpos, hpos, final, R=8) recon_estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=fpath) def predict_input_fn(data=data, M0=0., w=3., R0=0., off=None, istd=None, x0=None): features = {} features['datasm'] = data features['rsdfactor'] = rsdfactor features['M0'] = M0 features['w'] = w features['R0'] = R0 features['off'] = off features['istd'] = istd features['x0'] = x0 return features, None eval_results = recon_estimator.predict( input_fn=lambda: predict_input_fn(x0=ic), yield_single_examples=False) for i, pred in enumerate(eval_results): if i > 0: break suff = '-model' dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/') dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/', bs) np.save(fpath + '/reconmeshes/ic_true' + suff, pred['ic']) np.save(fpath + '/reconmeshes/fin_true' + suff, pred['final']) np.save(fpath + '/reconmeshes/model_true' + suff, pred['model']) # randominit = np.random.normal(size=data.size).reshape(data.shape) #eval_results = recon_estimator.predict(input_fn=lambda : predict_input_fn(x0 = np.expand_dims(stdinit, 0)), yield_single_examples=False) eval_results = recon_estimator.predict( input_fn=lambda: predict_input_fn(x0=randominit), yield_single_examples=False) for i, pred in enumerate(eval_results): if i > 0: break suff = '-init' dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/') dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/', bs) np.save(fpath + '/reconmeshes/ic_init' + suff, pred['ic']) np.save(fpath + '/reconmeshes/fin_init' + suff, pred['final']) np.save(fpath + '/reconmeshes/model_init' + suff, pred['model']) # # Train and evaluate model. mms = [1e12, 1e11] wws = [1., 2., 3.] RRs = [4., 2., 1., 0.5, 0.] niter = 100 iiter = 0 for mm in mms: noisefile = '/project/projectdirs/m3058/chmodi/cosmo4d/train/L0400_N0128_05step-n10/width_3/Wts_30_10_1/r1rf1/hlim-13_nreg-43_batch-5/eluWts-10_5_1/blim-20_nreg-23_batch-100/hist_M%d_na.txt' % ( np.log10(mm) * 10) offset, ivar = setnoise(datasm, noisefile, noisevar=0.25) istd = ivar**0.5 if not FLAGS.offset: offset = None if not FLAGS.istd: istd = None for R0 in RRs: for ww in wws: print('\nFor iteration %d\n' % iiter) print('With mm=%0.2e, R0=%0.2f, ww=%d \n' % (mm, R0, ww)) def train_input_fn(): features = {} features['datasm'] = datasm features['rsdfactor'] = rsdfactor features['M0'] = mm features['w'] = ww features['R0'] = R0 features['off'] = offset features['istd'] = istd features['x0'] = np.expand_dims( stdinit, 0 ) #np.random.normal(size=datasm.size).reshape(datasm.shape) features['lr'] = 0.01 return features, None recon_estimator.train(input_fn=train_input_fn, max_steps=iiter + niter) eval_results = recon_estimator.predict( input_fn=predict_input_fn, yield_single_examples=False) for i, pred in enumerate(eval_results): if i > 0: break iiter += niter # suff = '-%d-M%d-R%d-w%d' % (iiter, np.log10(mm), R0, ww) dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/') dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/', bs) suff = '-M%d-R%d-w%d' % (np.log10(mm), R0, ww) np.save(fpath + '/reconmeshes/ic' + suff, pred['ic']) np.save(fpath + '/reconmeshes/fin' + suff, pred['final']) np.save(fpath + '/reconmeshes/model' + suff, pred['model']) RRs = [1., 0.5, 0.] wws = [3.] niter = 200 sys.exit(0) ## exit(0)
bs = 400 nc, ncf = 128, 512 ncp = 128 shape = (ncp, ncp, ncp) step, stepf = 5, 40 path = './../data/z00/' ftype = 'L%04d_N%04d_S%04d_%02dstep/' numd = 1e-3 num = int(numd*bs**3) seeds = [100, 200, 500, 700] R1 = 3 R2 = 3*1.2 kny = np.pi*ncp/bs kk = tools.fftk((ncp, ncp, ncp), bs) ############################# tf.reset_default_graph() suff = 'pad2d8regvtest' ftname = ['cic'] nchannels = len(ftname) cube_size = 32 max_offset = ncp - cube_size pad = 2 cube_sizeft = cube_size + 2*pad #
def check_module(modpath): print('\nTest module\n') tf.reset_default_graph() module = hub.Module(modpath + '/likelihood/') xx = tf.placeholder(tf.float32, shape=[None, None, None, None, nchannels], name='input') yy = tf.placeholder(tf.float32, shape=[None, None, None, None, ntargets], name='labels') samples = module(dict(features=xx, labels=yy), as_dict=True)['sample'] loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood'] for j in range(nsizes): bs, nc = bsnclist[j] with tf.Session() as sess: sess.run(tf.initializers.global_variables()) vseeds = np.random.choice(test_features[j].shape[0], 10) xxm = test_features[j][vseeds] yym = test_target[j][vseeds] print('xxm, yym shape = ', xxm.shape, yym.shape) preds = sess.run(samples, feed_dict={xx: xxm, yy: yym}) ############################## ##Power spectrum shape = [nc, nc, nc] kk = tools.fftk(shape, bs) kmesh = sum(i**2 for i in kk)**0.5 print(kmesh.shape) print(preds.shape, yym.shape) fig, axar = plt.subplots(2, 2, figsize=(8, 8)) ax = axar[0] for iseed, seed in enumerate(vseeds): predict, hpmeshd = np.squeeze(preds[iseed]), np.squeeze(yym[iseed]) k, pkpred = tools.power(predict / predict.mean(), boxsize=bs, k=kmesh) k, pkhd = tools.power(hpmeshd / hpmeshd.mean(), boxsize=bs, k=kmesh) k, pkhx = tools.power(hpmeshd / hpmeshd.mean(), predict / predict.mean(), boxsize=bs, k=kmesh) ## ax[0].semilogx(k[1:], pkpred[1:] / pkhd[1:], label=seed) ax[1].semilogx(k[1:], pkhx[1:] / (pkpred[1:] * pkhd[1:])**0.5) for axis in ax.flatten(): axis.legend(fontsize=14, ncol=3) axis.set_yticks(np.arange(0, 1.2, 0.1)) axis.grid(which='both') axis.set_ylim(0., 1.1) ax[0].set_ylabel('Transfer function', fontsize=14) ax[1].set_ylabel('Cross correlation', fontsize=14) # ax = axar[1] vmin, vmax = 1, (hpmeshd[:, :, :].sum(axis=0)).max() im = ax[0].imshow(predict[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) plt.colorbar(im, ax=ax[0]) im = ax[1].imshow(hpmeshd[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) plt.colorbar(im, ax=ax[1]) ax[0].set_title('Prediction', fontsize=15) ax[1].set_title('Truth', fontsize=15) plt.savefig(savepath + '/vpredict%d-%d.png' % (nc, max_steps)) plt.show() plt.figure() plt.hist(yym.flatten(), range=(-1, 20), bins=100, label='target', alpha=0.8) plt.hist(preds.flatten(), range=(-1, 20), bins=100, label='prediict', alpha=0.5) plt.legend() plt.savefig(savepath + '/hist%d-%d.png' % (nc, max_steps)) plt.show() ## dosampletrue = False if max_steps in [100, 5000, 10000, 15000, 20000, 25000, 30000]: dosampletrue = True csize = 32 if dosampletrue: sampletrue(modpath)
def check_module(modpath): print('\nTest module\n') tf.reset_default_graph() module = hub.Module(modpath + '/likelihood/') xx = tf.placeholder(tf.float32, shape=[None, None, None, None, nchannels], name='input') yy = tf.placeholder(tf.float32, shape=[None, None, None, None, ntargets], name='labels') samples = module(dict(features=xx, labels=yy), as_dict=True)['sample'] loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood'] preds = {} with tf.Session() as sess: sess.run(tf.initializers.global_variables()) for seed in vseeds: xxm = np.stack( [np.pad(vmeshes[seed][0][i], pad, 'wrap') for i in ftname], axis=-1) #yym = np.stack([np.pad(vmeshes[seed][1]['pnncen'], pad, 'wrap'), np.pad(vmeshes[seed][1]['pnnsat'], pad, 'wrap')], axis=-1) yym = np.stack([vmeshes[seed][1][i] for i in tgname], axis=-1) print('xxm, yym shape = ', xxm.shape, yym.shape) preds[seed] = sess.run(samples, feed_dict={ xx: np.expand_dims(xxm, 0), yy: np.expand_dims(yym, 0) }) preds[seed] = np.squeeze(preds[seed]) vmeshes[seed][0]['predict'] = preds[seed] #[:, :, :] ############################## ##Power spectrum shape = [nc, nc, nc] kk = tools.fftk(shape, bs) kmesh = sum(i**2 for i in kk)**0.5 yy = ['pos', 'mass'] for iy in range(2): fig, axar = plt.subplots(2, 2, figsize=(8, 8)) ax = axar[0] for seed in vseeds: predict, hpmeshd = vmeshes[seed][0]['predict'][..., iy], np.stack( [vmeshes[seed][1][i] for i in tgname], axis=-1)[..., iy] print(predict.shape, hpmeshd.shape) k, pkpred = tools.power(predict / predict.mean(), boxsize=bs, k=kmesh) k, pkhd = tools.power(hpmeshd / hpmeshd.mean(), boxsize=bs, k=kmesh) k, pkhx = tools.power(hpmeshd / hpmeshd.mean(), predict / predict.mean(), boxsize=bs, k=kmesh) ## ax[0].semilogx(k[1:], pkpred[1:] / pkhd[1:], label=seed) ax[1].semilogx(k[1:], pkhx[1:] / (pkpred[1:] * pkhd[1:])**0.5) for axis in ax.flatten(): axis.legend(fontsize=14) axis.set_yticks(np.arange(0, 1.2, 0.1)) axis.grid(which='both') axis.set_ylim(0., 1.1) ax[0].set_ylabel('Transfer function', fontsize=14) ax[1].set_ylabel('Cross correlation', fontsize=14) # ax = axar[1] #predict, hpmeshd = vmeshes[seed][0]['predict][...,iy] , np.stack([vmeshes[seed][1][i] for i in tgname], axis=-1)[...,iy] vmin, vmax = 0, (hpmeshd[:, :, :].sum(axis=0)).max() im = ax[0].imshow(predict[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) im = ax[1].imshow(hpmeshd[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) ax[0].set_title('Prediction', fontsize=15) ax[1].set_title('Truth', fontsize=15) plt.savefig(savepath + '/vpredict%d-%s.png' % (max_steps, yy[iy])) plt.show() plt.figure() plt.hist(hpmeshd.flatten(), range=(-1, 20), bins=100, label='target', alpha=0.8) plt.hist(predict.flatten(), range=(-1, 20), bins=100, label='prediict', alpha=0.5) plt.legend() plt.yscale('log') plt.savefig(savepath + '/hist%d-%s.png' % (max_steps, yy[iy])) plt.show()
seed(seed_in) from tensorflow import set_random_seed set_random_seed(seed_in) bs = 400 nc, ncf = 128, 512 step, stepf = 10, 40 path = '../data/z00/' ftype = 'L%04d_N%04d_S%04d_%02dstep/' ftypefpm = 'L%04d_N%04d_S%04d_%02dstep_fpm/' numd = 1e-3 num = int(numd * bs**3) R1 = 3 R2 = 3 * 1.2 kny = np.pi * nc / bs kk = tools.fftk((nc, nc, nc), bs) ############################# pad = int(0) masktype = 'constant' dependence = None suff = 'pad%d-cic-allnn-cmask-pois4normmix-monp' % pad savepath = '../models/n10/%s/module/' % suff ftname = ['cic'] tgname = ['pnn', 'mnnnomean'] nchannels = len(ftname) ntargets = len(tgname)