def standardinit(config, base, pos, final, R=8): ## print('Initial condition from standard reconstruction') bs, nc = config['boxsize'], config['nc'] if abs(base.mean()) > 1e-6: base = (base - base.mean()) / base.mean() pfin = tools.power(final, boxsize=bs)[1] ph = tools.power(1 + base, boxsize=bs)[1] bias = ((ph[1:5] / pfin[1:5])**0.5).mean() print('Bias = ', bias) g = standardrecon(config, base, pos, bias, R=R) with tf.Session(graph=g) as sess: sess.run(tf.global_variables_initializer()) tfdisplaced = g.get_tensor_by_name('displaced:0') tfrandom = g.get_tensor_by_name('random:0') displaced, random = sess.run([tfdisplaced, tfrandom]) displaced /= displaced.mean() displaced -= 1 random /= random.mean() random -= 1 recon = displaced - random return recon
def savehalofig(truemesh, reconmesh, fname, hgraph, boxsize, title=''): '''Given a graph, list of 3 fields in truemesh & recon-init create the diagnostic figure, 3X3 ''' truelin, truefin, truedata = truemesh with tf.Session(graph=hgraph) as sessionh: sessionh.run(tf.global_variables_initializer()) gh = sessionh.graph linmesh_t = gh.get_tensor_by_name('linmesh:0') datamesh_t = gh.get_tensor_by_name('datamesh:0') linear_t = gh.get_tensor_by_name('linear:0') final_t = gh.get_tensor_by_name('final:0') samples_t = gh.get_tensor_by_name('samples:0') linear, final, data = sessionh.run( [linear_t, final_t, samples_t], { linmesh_t: reconmesh, datamesh_t: np.expand_dims(truedata, -1) * 0 }) fig, ax = plt.subplots(3, 3, figsize=(12, 12)) meshes = [[truelin, linear], [truefin, final], [truedata, data]] labels = ['Linear', 'Final', 'Data'] for i in range(3): m1, m2 = meshes[i][0], meshes[i][1] if m1.mean() < 1e-6: m1, m2 = m1 + 1, m2 + 1 k, pt = tools.power(m1, boxsize=boxsize) k, pr = tools.power(m2, boxsize=boxsize) k, px = tools.power(m1, m2, boxsize=boxsize) ax[0, 0].semilogx(k, px / (pr * pt)**.5, 'C%d' % i, label=labels[i]) ax[0, 1].semilogx(k, pr / pt, 'C%d' % i) ax[0, 2].loglog(k, pt, 'C%d' % i) ax[0, 2].loglog(k, pr, 'C%d--' % i) ax[1, i].imshow(m2.sum(axis=0)) ax[2, i].imshow(m1.sum(axis=0)) ax[2, 0].set_ylabel('Truth') ax[1, 0].set_ylabel('Recon') ax[0, 0].set_title('Cross Correlation') ax[0, 0].set_ylim(-0.1, 1.1) ax[0, 1].set_title('Transfer Function') ax[0, 1].set_ylim(-0.1, 2) ax[0, 2].set_title('Powers') ax[0, 2].set_ylim(1, 1e5) ax[0, 0].legend() for axis in ax.flatten(): axis.grid(which='both', lw=0.5, color='gray') fig.suptitle(title) fig.tight_layout(rect=[0, 0, 1, 0.95]) fig.savefig(fname)
def savefig(truemesh, reconmesh, fname): fig, ax = plt.subplots(2, 3, figsize=(12, 8)) k, pt = tools.power(1 + truemesh, boxsize=bs) k, pr = tools.power(1 + reconmesh, boxsize=bs) k, px = tools.power(1 + truemesh, 1 + reconmesh, boxsize=bs) ax[0, 0].semilogx(k, px / (pr * pt)**.5, 'C0') ax[1, 0].semilogx(k, pr / pt, 'C0') ax[0, 1].loglog(k, pt) ax[1, 1].loglog(k, pr) ax[0, 2].imshow(truemesh.sum(axis=0)) ax[1, 2].imshow(reconmesh.sum(axis=0)) for axis in ax.flatten(): axis.grid(which='both', lw=0.5, color='gray') fig.tight_layout() fig.savefig(fname)
def setupbias(self, traindata, nsims = 10, cutoff=1.5): args = self.args bs, nc = args.bs, args.nc b1, b2, perr = [], [], [] for i in range(nsims): idx = np.random.randint(0, traindata.shape[0], 1) xx = traindata[idx, 0].astype(np.float32) yy = traindata[idx, 1].astype(np.float32) _, fpos = self.pmpos(tf.constant(xx)) fpos = fpos[0].numpy() *bs/nc bparams, bmodel = getbias(bs, nc, yy[0]+1, xx[0], fpos) bmodeltf = self.biasfield(xx, tf.constant([bparams[0], bparams[1]], dtype=tf.float32)).numpy() errormesh = yy - bmodeltf# np.expand_dims(bmodel, 0) kerror, perror = tools.power(errormesh[0]+1, boxsize=bs) kerror, perror = kerror[1:], perror[1:] perr += [perror] b1 += [bparams[0]] b2 += [bparams[1]] print("b1 : %0.3f $\pm$ %0.2f"%(np.array(b1).mean(), np.array(b1).std())) print("b2 : : %0.3f $\pm$ %0.2f"%(np.array(b2).mean(), np.array(b2).std())) b1, b2 = np.array(b1).mean(), np.array(b2).mean() perr = np.array(perr).mean(axis=0) kny = nc*np.pi/bs cutoff = 1.5 perr[np.where(kerror > cutoff*kny)] = perr[np.where(kerror > cutoff*kny)[0][0]] ipkerror = interp1d(kerror, perr, bounds_error=False, fill_value=(perr[0], perr.max())) errormesh = tf.expand_dims(tf.constant(ipkerror(args.kmesh), dtype=tf.float32), 0) #ipkerror = lambda x: 10**np.interp(np.log10(x), np.log10(kerror), np.log10(perr)) #errormesh = tf.constant(ipkerror(args.kmesh), dtype=tf.float32) bias = tf.constant([b1, b2], dtype=tf.float32) return bias, errormesh
def setupbias(nsims = 10, cutoff=1.5): b1, b2, perr = [], [], [] for i in range(nsims): idx = np.random.randint(0, traindata.shape[0], 1) xx = traindata[idx, 0].astype(np.float32) yy = traindata[idx, 1].astype(np.float32) _, fpos = pmpos(tf.constant(xx)) fpos = fpos[0].numpy() *bs/nc bparams, bmodel = getbias(bs, nc, yy[0]+1, xx[0], fpos) errormesh = yy - np.expand_dims(bmodel, 0) kerror, perror = tools.power(errormesh[0]+1, boxsize=bs) kerror, perror = kerror[1:], perror[1:] perr += [perror] b1 += [bparams[0]] b2 += [bparams[1]] print("b1 : %0.3f $\pm$ %0.2f"%(np.array(b1).mean(), np.array(b1).std())) print("b2 : : %0.3f $\pm$ %0.2f"%(np.array(b2).mean(), np.array(b2).std())) b1, b2 = np.array(b1).mean(), np.array(b2).mean() perr = np.array(perr).mean(axis=0) kny = nc*np.pi/bs perr[np.where(kerror > cutoff*kny)] = perr[np.where(kerror > cutoff*kny)[0][0]] ipkerror = lambda x: 10**np.interp(np.log10(x), np.log10(kerror), np.log10(perr)) errormesh = tf.constant(ipkerror(kmesh), dtype=tf.float32) return b1, b2, errormesh
def get_diff_spectra(args, ipklin, nsims=10, nsteps=3): bs, nc = args.bs, args.nc nsims = args.nsims numd = args.numd try: R=args.Rstd except: R=128 ncf=args.ncf path = '//mnt/ceph/users/cmodi/cosmo4d/z00/' dpath = path + '/L%04d_N%04d_D%04d//'%(bs, nc, numd*1e4) alldata = np.array([np.load(dpath + 'S%04d.npy'%i) for i in range(100, 100+nsims)]).astype(np.float32) initdata = np.array([np.load(dpath + 'stdR%d_S%04d.npy'%(R,i)) for i in range(100, 100+nsims)]).astype(np.float32) try: dyn = "%02dstep"%nsteps path = '//mnt/ceph/users/cmodi/cosmo4d/z00/' path = path + '/L%04d_N%04d_%s//'%(bs, nc, dyn) final = np.array([tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/d/'%(bs, nc, seed, nsteps)) for seed in range(100, 100+nsims)]).astype(np.float32) except: dyn = "%02dstep_B1"%nsteps path = '//mnt/ceph/users/cmodi/cosmo4d/z00/' path = path + '/L%04d_N%04d_%s//'%(bs, nc, dyn) final = np.array([tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/d/'%(bs, nc, seed, nsteps)) for seed in range(100, 100+nsims)]).astype(np.float32) print('alldata shape :', alldata.shape) pdiffs, bb = [], [] for j in range(nsims): k, pfin = tools.power(final[j], boxsize=bs) ph = tools.power(1+alldata[j, 1], boxsize=bs)[1] bias = ((ph[1:5]/pfin[1:5])**0.5).mean() bb.append(bias) recon = initdata[j] / bias precon =tools.power(1+recon, boxsize=bs)[1] pdiff = ipklin(k) - precon pdiffs.append(pdiff) pdiff = np.array(pdiffs).mean(axis=0) bias = np.array(bb).mean(axis=0) xx, yy = k[pdiff > 0], pdiff[pdiff > 0] ipkdiff = lambda x: 10**np.interp(np.log10(x), np.log10(xx), np.log10(yy)) return ipkdiff, bias
def fib(n): """finds nth fibonacci number in logarithmic time""" goldenRatio = (1 + 5 ** 0.5) / 2 sequence = 0, 1, 1 try: return sequence[n] except IndexError: binetApprox = power(goldenRatio, n) / (5 ** 0.5) if n % 2 == 0: return int(binetApprox) return int(1 + binetApprox)
def save2ptfig(i, iterand, truth, fpath, bs, fsize=12, save=True, retfig=False): ic, fin = truth ic1, fin1 = iterand pks = [] if abs(ic1[0].mean()) < 1e-3: ic1[0] += 1 #if abs(ic[0].mean()) < 1e-3: ic[0] += 1 k, p1 = tools.power(ic1[0]+1, boxsize=bs) k, p2 = tools.power(ic[0]+1, boxsize=bs) k, p12 = tools.power(ic1[0]+1, f2=ic[0]+1, boxsize=bs) pks.append([p1, p2, p12]) if fin1[0].mean() < 1e-3: fin1[0] += 1 if fin[0].mean() < 1e-3: fin[0] += 1 k, p1 = tools.power(fin1[0], boxsize=bs) k, p2 = tools.power(fin[0], boxsize=bs) k, p12 = tools.power(fin1[0], f2=fin[0], boxsize=bs) pks.append([p1, p2, p12]) fig, ax = plt.subplots(1, 3, figsize=(12,3.5)) ax[0].plot(k, pks[0][0], 'C0', lw=2, label='Recon') ax[0].plot(k, pks[0][1], 'C0--', lw=2, label='Truth') ax[0].plot(k, pks[1][0], 'C1', lw=2) ax[0].plot(k, pks[1][1], 'C1--', lw=2) ax[0].loglog() ax[0].set_ylabel('P(k)') p1, p2, p12 = pks[0] ax[1].plot(k, p12/(p1*p2)**0.5, 'C0', lw=2, label='Init') ax[2].plot(k, (p1/p2)**0.5, 'C0', lw=2, label='') p1, p2, p12 = pks[1] ax[1].plot(k, p12/(p1*p2)**0.5, 'C1', lw=2, label='Final') ax[2].plot(k, (p1/p2)**0.5, 'C1', lw=2, label='') ax[1].semilogx() ax[2].semilogx() ax[1].set_ylim(-0.1, 1.1) ax[2].set_ylim(-0.1, 2.) ax[1].set_ylabel('Cross correlation', fontsize=fsize) ax[2].set_ylabel('Transfer Function', fontsize=fsize) for axis in ax: axis.legend(fontsize=fsize) axis.grid(which='both') axis.set_xlabel('k (h/Mpc)', fontsize=fsize) plt.tight_layout() if save: plt.savefig(fpath + '/recon2pt%s.png'%str(i)) plt.close() if retfig: return fig, ax
def get_ps(iterand, truth): ic, fin = truth ic1, fin1 = iterand pks = [] if abs(ic1[0].mean()) < 1e-3: ic1[0] += 1 #if abs(ic[0].mean()) < 1e-3: ic[0] += 1 k, p1 = tools.power(ic1[0]+1, boxsize=bs) k, p2 = tools.power(ic[0]+1, boxsize=bs) k, p12 = tools.power(ic1[0]+1, f2=ic[0]+1, boxsize=bs) pks.append([p1, p2, p12]) if fin1[0].mean() < 1e-3: fin1[0] += 1 if fin[0].mean() < 1e-3: fin[0] += 1 k, p1 = tools.power(fin1[0], boxsize=bs) k, p2 = tools.power(fin[0], boxsize=bs) k, p12 = tools.power(fin1[0], f2=fin[0], boxsize=bs) pks.append([p1, p2, p12]) return k, pks
import tools import matplotlib.pyplot as plt bs, nc = 400, 128 seed = 100 numd = 1e-3 ofolder = './saved/L%04d_N%04d_S%04d_n%02d/anneal4/nc0norm-truth/' % ( bs, nc, seed, numd * 1e4) figfolder = ofolder + 'figs/' try: os.makedirs(figfolder) except: pass truemesh = np.load(ofolder + 'truth.f4.npy') k, pt = tools.power(1 + truemesh, boxsize=bs) fig, ax = plt.subplots(1, 2, figsize=(9, 4), sharex=True) #iters = [0, 100, 200, 300, 500] iters = [1000, 600, 700, 800, 900] subf = 'R20' reconfile = '/%s/iter1000.f4.npy' % subf nit = 1000 for j, it in enumerate(iters): reconmesh = np.load(ofolder + '/%s/iter%d.f4.npy' % (subf, it)) k, pr = tools.power(1 + reconmesh, boxsize=bs) k, px = tools.power(1 + truemesh, 1 + reconmesh, boxsize=bs) ax[0].plot(k, px / (pr * pt)**.5, 'C%d' % j, label=it) ax[1].plot(k, pr / pt, 'C%d' % j)
mesh['predict'] = dtools.uncubify(recp[:,:,:,:,0], shape) meshes[seed] = [mesh, hmesh] print('All the predictions have been generated for seed = %d'%seed) ############################## ##Power spectrum kk = tools.fftk(shape, bs) kmesh = sum(i**2 for i in kk)**0.5 fig, ax = plt.subplots(1, 2, figsize = (10, 4)) for seed in seeds: predict, hpmeshd = meshes[seed][0]['predict'], meshes[seed][1]['target'], k, pkpred = tools.power(predict/predict.mean(), boxsize=bs, k=kmesh) k, pkhd = tools.power(hpmeshd/hpmeshd.mean(), boxsize=bs, k=kmesh) k, pkhx = tools.power(hpmeshd/hpmeshd.mean(), predict/predict.mean(), boxsize=bs, k=kmesh) #k, pkpredall = tools.power(predictall/predictall.mean(), boxsize=bs, k=kmesh) #k, pkhallx = tools.power(hpmeshd/hpmeshd.mean(), predictall/predictall.mean(), boxsize=bs, k=kmesh) ## ax[0].semilogx(k, pkpred/pkhd, label=seed) ax[1].semilogx(k, pkhx/(pkpred*pkhd)**0.5) # plt.plot(k, pkpredall/pkhd) ax[0].legend(fontsize=14) ax[0].set_title('Trasnfer function', fontsize=14) ax[1].set_title('Cross correlation', fontsize=14) for axis in ax: axis.set_ylim(0., 1.1) for axis in ax: axis.set_yticks(np.arange(0, 1.1, 0.1)) for axis in ax: axis.grid(which='both')
def all_sim(): path = '//mnt/ceph/users/cmodi/cosmo4d/z00/' dyn = "%02dstep_B1"%nsteps dynf = "%02dstep_B1"%nstepsf hpath = path + '/L%04d_N%04d_%s//'%(bs, ncf, dynf) path = path + '/L%04d_N%04d_%s//'%(bs, nc, dyn) for seed in range(100, 601): print(seed) ic = tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/s/'%(bs, nc, seed, nsteps)) final = tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/d/'%(bs, nc, seed, nsteps)) hpos = tools.readbigfile(hpath + '/L%04d_N%04d_S%04d_%02dstep/FOF/PeakPosition/'%(bs, ncf, seed, nstepsf))[:num] hmassall = tools.readbigfile(hpath + '/L%04d_N%04d_S%04d_%02dstep/FOF/Mass/'%(bs, ncf, seed, nstepsf)).flatten() hmass = hmassall[:num] hmeshpos = tools.paintcic(hpos, bs, nc) hmeshmass = tools.paintcic(hpos, bs, nc, hmass.flatten()*1e10) hmeshmass /= hmeshmass.mean() hmeshmass -= 1 hmeshpos /= hmeshpos.mean() hmeshpos -= 1 if posdata: data = tf.constant(hmeshpos.reshape(1, nc, nc, nc), dtype=tf.float32) else: data = tf.constant(hmeshmass.reshape(1, nc, nc, nc), dtype=tf.float32) base = hmeshpos pfin = tools.power(final, boxsize=bs)[1] ph = tools.power(1+base, boxsize=bs)[1] bias = ((ph[1:5]/pfin[1:5])**0.5).mean() tfdisplaced, tfrandom = standardrecon(data, tf.expand_dims(tf.constant(hpos, dtype=tf.float32), 0), tf.constant(bias, dtype=tf.float32), R=tf.constant(R, dtype=tf.float32)) displaced, random = tfdisplaced.numpy()[0], tfrandom.numpy()[0] displaced /= displaced.mean() displaced -= 1 random /= random.mean() random -= 1 recon = np.squeeze(displaced - random) savepath = '//mnt/ceph/users/cmodi/cosmo4d/z00/L%04d_N%04d_D%04d//'%(bs, nc, numd*1e4) np.save(savepath + 'stdR%d_S%04d'%(R, seed), recon) if seed == 100: import matplotlib.pyplot as plt plt.figure(figsize = (9, 4)) plt.subplot(131) plt.imshow(ic.sum(axis=0)) plt.subplot(132) plt.imshow(data.numpy()[0].sum(axis=0)) plt.subplot(133) plt.imshow(recon.sum(axis=0)) plt.savefig('tmp.png') plt.close() print(ic.mean(), recon.mean()) k, p1 = tools.power(ic+1, boxsize=bs) p2 = tools.power(recon+1, boxsize=bs)[1] px = tools.power(ic+1, f2=recon+1, boxsize=bs)[1] plt.plot(k, p2/p1) plt.plot(k, px/(p1*p2)**0.5, '--') plt.semilogx() plt.savefig('tmp2.png') plt.close()
def check_module(modpath): print('Test module') tf.reset_default_graph() module = hub.Module(modpath + '/likelihood/') xx = tf.placeholder(tf.float32, shape=[None, None, None, None, nchannels], name='input') yy = tf.placeholder(tf.float32, shape=[None, None, None, None, ntargets], name='labels') samples = module(dict(features=xx, labels=yy), as_dict=True)['sample'] loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood'] preds = {} with tf.Session() as sess: sess.run(tf.initializers.global_variables()) for seed in seeds: xxm = np.pad(meshes[seed][0]['cic'], pad, 'wrap') #yym = np.stack([np.pad(meshes[seed][1]['pnncen'], pad, 'wrap'), np.pad(meshes[seed][1]['pnnsat'], pad, 'wrap')], axis=-1) yym = np.stack([meshes[seed][1][i] for i in tgname], axis=-1) print(xxm.shape, yym.shape) preds[seed] = sess.run(samples, feed_dict={ xx: np.expand_dims(np.expand_dims(xxm, -1), 0), yy: np.expand_dims(yym, 0) }) meshes[seed][0]['predict'] = preds[seed][:, :, :, :].sum(axis=-1) meshes[seed][0]['predictcen'] = preds[seed][:, :, :, 0] meshes[seed][0]['predictsat'] = preds[seed][:, :, :, 1] ############################## ##Power spectrum shape = [nc, nc, nc] kk = tools.fftk(shape, bs) kmesh = sum(i**2 for i in kk)**0.5 fig, ax = plt.subplots(2, 3, figsize=(12, 8)) for seed in seeds: for i, key in enumerate(['cen', 'sat']): predict, hpmeshd = meshes[seed][0]['predict%s' % key], meshes[seed][1]['pnn%s' % key], k, pkpred = tools.power(predict / predict.mean(), boxsize=bs, k=kmesh) k, pkhd = tools.power(hpmeshd / hpmeshd.mean(), boxsize=bs, k=kmesh) k, pkhx = tools.power(hpmeshd / hpmeshd.mean(), predict / predict.mean(), boxsize=bs, k=kmesh) ## ax[0, i].semilogx(k[1:], pkpred[1:] / pkhd[1:], label=seed) ax[1, i].semilogx(k[1:], pkhx[1:] / (pkpred[1:] * pkhd[1:])**0.5) ax[0, i].set_title(key, fontsize=12) i = 2 predict, hpmeshd = meshes[seed][0]['predictcen']+meshes[seed][0]['predictsat'] ,\ meshes[seed][1]['pnncen']+meshes[seed][1]['pnnsat'] k, pkpred = tools.power(predict / predict.mean(), boxsize=bs, k=kmesh) k, pkhd = tools.power(hpmeshd / hpmeshd.mean(), boxsize=bs, k=kmesh) k, pkhx = tools.power(hpmeshd / hpmeshd.mean(), predict / predict.mean(), boxsize=bs, k=kmesh) ## ax[0, i].semilogx(k[1:], pkpred[1:] / pkhd[1:], label=seed, ls='-') ax[1, i].semilogx(k[1:], pkhx[1:] / (pkpred[1:] * pkhd[1:])**0.5, ls='-') for axis in ax.flatten(): axis.legend(fontsize=14) axis.set_yticks(np.arange(0, 1.1, 0.1)) axis.grid(which='both') axis.set_ylim(0., 1.1) ax[0, i].set_title('All Gal', fontsize=15) ax[0, 0].set_ylabel('Transfer function', fontsize=14) ax[1, 0].set_ylabel('Cross correlation', fontsize=14) plt.savefig(savepath + '/2pt%d.png' % max_steps) plt.show() # ################################################## fig, ax = plt.subplots(2, 3, figsize=(12, 8)) for i, key in enumerate(['cen', 'sat']): predict, hpmeshd = meshes[seed][0]['predict%s' % key], meshes[seed][1]['pnn%s' % key], vmin, vmax = 0, (hpmeshd[:, :, :].sum(axis=0)).max() im = ax[0, i].imshow(predict[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) im = ax[1, i].imshow(hpmeshd[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) ax[0, i].set_title(key, fontsize=15) i = 2 predict, hpmeshd = meshes[seed][0]['predictcen']+meshes[seed][0]['predictsat'] ,\ meshes[seed][1]['pnncen']+meshes[seed][1]['pnnsat'] im = ax[0, i].imshow(predict[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) im = ax[1, i].imshow(hpmeshd[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) ax[0, i].set_title('All Gal', fontsize=15) ax[0, 0].set_ylabel('Prediction', fontsize=15) ax[1, 0].set_ylabel('Truth', fontsize=15) plt.savefig(savepath + '/imshow%d.png' % max_steps) plt.show()
def sampletrue(modpath, csize): print('sampling true') tf.reset_default_graph() module = hub.Module(modpath + '/likelihood/') xx = tf.placeholder(tf.float32, shape=[None, None, None, None, nchannels], name='input') yy = tf.placeholder(tf.float32, shape=[None, None, None, None, ntargets], name='labels') samples = module(dict(features=xx, labels=yy), as_dict=True)['sample'] loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood'] index = np.where(cube_sizes == csize)[0][0] with tf.Session() as sess: sess.run(tf.initializers.global_variables()) start = time() #sess.run(tf.initializers.global_variables()) features = cube_features[index][0:1].astype('float32') targets = cube_target[index][0:1].astype('float32') xxm = features yym = targets print(xxm.shape, yym.shape) sample = np.zeros_like(yym) sample2 = sess.run(samples, feed_dict={xx: xxm, yy: yym * 0}) for i in range(yym.shape[1]): for j in range(yym.shape[2]): for k in range(yym.shape[3]): data_dict = {xx: xxm, yy: sample} next_sample = sess.run(samples, feed_dict=data_dict) sample[:, i, j, k, :] = next_sample[:, i, j, k, :] end = time() print('Taken : ', end - start) plt.figure(figsize=(12, 4)) vmin, vmax = None, None plt.subplot(131) plt.imshow(yym[0, ..., 0].sum(axis=0), vmin=vmin, vmax=vmax) plt.colorbar() plt.subplot(132) plt.imshow(sample[0, ..., 0].sum(axis=0), vmin=vmin, vmax=vmax) plt.colorbar() plt.subplot(133) plt.imshow(sample2[0, ..., 0].sum(axis=0), vmin=vmin, vmax=vmax) plt.colorbar() plt.savefig(savepath + '/sampletrue_im%d' % max_steps) plt.figure() plt.hist(sample2.flatten(), range=(-5, 5), bins=100, label='predict', alpha=0.8) plt.hist(yym[0, ..., 0].flatten(), range=(-5, 5), bins=100, label='target', alpha=0.5) plt.hist(sample.flatten(), range=(-5, 5), bins=100, label='predicttrue', alpha=0.5) plt.legend() plt.savefig(savepath + '/truehist%d.png' % max_steps) plt.show() ## ii = 0 k, ph = tools.power(yym[ii, ..., ], boxsize=bs / cube_sizes[index]) k, pp1 = tools.power(sample[ii, ..., ], boxsize=bs / cube_sizes[index]) k, pp1x = tools.power(sample[ii, ..., ], yym[ii, ..., ], boxsize=bs / cube_sizes[index]) k, pp2 = tools.power(sample2[ii, ..., ], boxsize=bs / cube_sizes[index]) k, pp2x = tools.power(sample2[ii, ..., ], yym[ii, ..., ], boxsize=bs / cube_sizes[index]) plt.figure(figsize=(10, 4)) plt.subplot(121) # plt.plot(k, ph, 'C%d-'%ii) plt.plot(k, pp1 / ph, 'C%d-' % ii) plt.plot(k, pp2 / ph, 'C%d--' % ii) plt.ylim(0, 1.5) plt.grid(which='both') plt.semilogx() # plt.loglog() plt.subplot(122) plt.plot(k, pp1x / (pp1 * ph)**0.5, 'C%d-' % ii) plt.plot(k, pp2x / (pp2 * ph)**0.5, 'C%d--' % ii) plt.ylim(0, 1) plt.grid(which='both') plt.semilogx() plt.savefig(savepath + '/sampletrue_2pt%d' % max_steps)
except Exception as e: print(e) with open(ofolder + 'params.json', 'w') as fp: json.dump(params, fp) ####################################### x_test, y_test = testdata[0:1, 0], testdata[0:1, 1:] x_test = tf.constant(x_test, dtype=tf.float32) fpos = datamodel.pmpos(x_test)[1].numpy()[0] * bs / nc bparams, bmodel = getbias(bs, nc, y_test[0, 0], x_test.numpy()[0], fpos) bias_test = tf.constant([bparams[0], bparams[1]], dtype=tf.float32) print('Bias test : ', bias_test) bmodeltf = datamodel.biasfield(x_test, bias_test).numpy() errormesh = y_test[:, 0] - bmodeltf kerror, perror = tools.power(errormesh[0], boxsize=bs) kerror, perror = kerror[1:], perror[1:] ipkerror = interp1d(kerror, perror, bounds_error=False, fill_value=(perror[0], perror.max())) errormesh_test = tf.expand_dims(tf.constant(ipkerror(kmesh), dtype=tf.float32), 0) # if args.stdinit: x_init = tf.constant(y_test[:, 1] / b1eul, dtype=tf.float32) if args.diffps: x_init = x_init + linear_field( nc, bs, ipkdiff, batch_size=y_test.shape[0]) elif args.priorinit: x_init = linear_field(nc, bs, ipklin, batch_size=y_test.shape[0])
def check_module(modpath): print('\nTest module\n') tf.reset_default_graph() module = hub.Module(modpath + '/likelihood/') xx = tf.placeholder(tf.float32, shape=[None, None, None, None, nchannels], name='input') yy = tf.placeholder(tf.float32, shape=[None, None, None, None, ntargets], name='labels') samples = module(dict(features=xx, labels=yy), as_dict=True)['sample'] loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood'] preds = {} with tf.Session() as sess: sess.run(tf.initializers.global_variables()) for seed in vseeds: xxm = np.stack( [np.pad(vmeshes[seed][0][i], pad, 'wrap') for i in ftname], axis=-1) #yym = np.stack([np.pad(vmeshes[seed][1]['pnncen'], pad, 'wrap'), np.pad(vmeshes[seed][1]['pnnsat'], pad, 'wrap')], axis=-1) yym = np.stack([vmeshes[seed][1][i] for i in tgname], axis=-1) print('xxm, yym shape = ', xxm.shape, yym.shape) preds[seed] = sess.run(samples, feed_dict={ xx: np.expand_dims(xxm, 0), yy: np.expand_dims(yym, 0) }) preds[seed] = np.squeeze(preds[seed]) vmeshes[seed][0]['predict'] = preds[seed] #[:, :, :] ############################## ##Power spectrum shape = [nc, nc, nc] kk = tools.fftk(shape, bs) kmesh = sum(i**2 for i in kk)**0.5 yy = ['pos', 'mass'] for iy in range(2): fig, axar = plt.subplots(2, 2, figsize=(8, 8)) ax = axar[0] for seed in vseeds: predict, hpmeshd = vmeshes[seed][0]['predict'][..., iy], np.stack( [vmeshes[seed][1][i] for i in tgname], axis=-1)[..., iy] print(predict.shape, hpmeshd.shape) k, pkpred = tools.power(predict / predict.mean(), boxsize=bs, k=kmesh) k, pkhd = tools.power(hpmeshd / hpmeshd.mean(), boxsize=bs, k=kmesh) k, pkhx = tools.power(hpmeshd / hpmeshd.mean(), predict / predict.mean(), boxsize=bs, k=kmesh) ## ax[0].semilogx(k[1:], pkpred[1:] / pkhd[1:], label=seed) ax[1].semilogx(k[1:], pkhx[1:] / (pkpred[1:] * pkhd[1:])**0.5) for axis in ax.flatten(): axis.legend(fontsize=14) axis.set_yticks(np.arange(0, 1.2, 0.1)) axis.grid(which='both') axis.set_ylim(0., 1.1) ax[0].set_ylabel('Transfer function', fontsize=14) ax[1].set_ylabel('Cross correlation', fontsize=14) # ax = axar[1] #predict, hpmeshd = vmeshes[seed][0]['predict][...,iy] , np.stack([vmeshes[seed][1][i] for i in tgname], axis=-1)[...,iy] vmin, vmax = 0, (hpmeshd[:, :, :].sum(axis=0)).max() im = ax[0].imshow(predict[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) im = ax[1].imshow(hpmeshd[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) ax[0].set_title('Prediction', fontsize=15) ax[1].set_title('Truth', fontsize=15) plt.savefig(savepath + '/vpredict%d-%s.png' % (max_steps, yy[iy])) plt.show() plt.figure() plt.hist(hpmeshd.flatten(), range=(-1, 20), bins=100, label='target', alpha=0.8) plt.hist(predict.flatten(), range=(-1, 20), bins=100, label='prediict', alpha=0.5) plt.legend() plt.yscale('log') plt.savefig(savepath + '/hist%d-%s.png' % (max_steps, yy[iy])) plt.show()
def check_module(modpath): print('\nTest module\n') tf.reset_default_graph() module = hub.Module(modpath + '/likelihood/') xx = tf.placeholder(tf.float32, shape=[None, None, None, None, nchannels], name='input') yy = tf.placeholder(tf.float32, shape=[None, None, None, None, ntargets], name='labels') samples = module(dict(features=xx, labels=yy), as_dict=True)['sample'] loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood'] preds = {} with tf.Session() as sess: sess.run(tf.initializers.global_variables()) for seed in vseeds: xxm = np.stack( [np.pad(vmeshes[seed][0][i], pad, 'wrap') for i in ftname], axis=-1) #yym = np.stack([np.pad(vmeshes[seed][1]['pnncen'], pad, 'wrap'), np.pad(vmeshes[seed][1]['pnnsat'], pad, 'wrap')], axis=-1) yym = np.stack([vmeshes[seed][1][i] for i in tgname], axis=-1) print('xxm, yym shape = ', xxm.shape, yym.shape) print('xxm :', xxm.mean(), xxm.std()) print('yym :', yym.mean(), yym.std()) preds[seed] = sess.run(samples, feed_dict={ xx: np.expand_dims(xxm, 0), yy: 0 * np.expand_dims(yym, 0) }) vmeshes[seed][0]['predict'] = np.squeeze(preds[seed]) ############################## ##Power spectrum shape = [nc, nc, nc] kk = tools.fftk(shape, bs) kmesh = sum(i**2 for i in kk)**0.5 fig, axar = plt.subplots(2, 2, figsize=(8, 8)) ax = axar[0] for seed in vseeds: for i, key in enumerate(['']): predict, hpmeshd = vmeshes[seed][0][ 'predict%s' % key], vmeshes[seed][1][tgname[0]], if predict.mean() < 1e-3: predict += 1 if hpmeshd.mean() < 1e-3: hpmeshd += 1 k, pkpred = tools.power(predict / predict.mean(), boxsize=bs, k=kmesh) k, pkhd = tools.power(hpmeshd / hpmeshd.mean(), boxsize=bs, k=kmesh) k, pkhx = tools.power(hpmeshd / hpmeshd.mean(), predict / predict.mean(), boxsize=bs, k=kmesh) ## ax[0].semilogx(k[1:], pkpred[1:] / pkhd[1:], label=seed) ax[1].semilogx(k[1:], pkhx[1:] / (pkpred[1:] * pkhd[1:])**0.5) ax[0].set_title(key, fontsize=12) for axis in ax.flatten(): axis.legend(fontsize=14) axis.set_yticks(np.arange(0, 1.2, 0.1)) axis.grid(which='both') axis.set_ylim(0., 1.1) ax[0].set_ylabel('Transfer function', fontsize=14) ax[1].set_ylabel('Cross correlation', fontsize=14) # ax = axar[1] for i, key in enumerate(['']): predict, hpmeshd = vmeshes[seed][0]['predict%s' % key], vmeshes[seed][1][tgname[0]], vmin, vmax = 0, (hpmeshd[:, :, :].sum(axis=0)).max() #im = ax[0].imshow(predict[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) #im = ax[1].imshow(hpmeshd[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) im = ax[0].imshow(predict[:, :, :].sum(axis=0)) plt.colorbar(im, ax=ax[0]) im = ax[1].imshow(hpmeshd[:, :, :].sum(axis=0)) plt.colorbar(im, ax=ax[1]) ax[0].set_title(key, fontsize=15) ax[0].set_title('Prediction', fontsize=15) ax[1].set_title('Truth', fontsize=15) plt.savefig(savepath + '/vpredict%d.png' % max_steps) plt.show() plt.figure() plt.hist(vmeshes[100][0]['predict'].flatten(), range=(-5, 5), bins=100) plt.hist(vmeshes[100][1][tgname[0]].flatten(), alpha=0.5, range=(-5, 5), bins=100) plt.savefig(savepath + '/hist%d.png' % max_steps) plt.show() dosampletrue = False if max_steps in [ 50, 100, 500, 1000, 5000, 15000, 25000, 35000, 45000, 55000, 65000 ]: dosampletrue = True csize = 16 if max_steps in [3000, 10000, 20000, 30000, 40000, 50000, 60000, 70000]: dosampletrue = True csize = 32 if dosampletrue: sampletrue(modpath, csize)
def main(_): dtype = tf.float32 startw = time.time() tf.random.set_random_seed(100) np.random.seed(100) # Compute a few things first, using simple tensorflow a0 = FLAGS.a0 a = FLAGS.af nsteps = FLAGS.nsteps bs, nc = FLAGS.box_size, FLAGS.nc klin = np.loadtxt('../data/Planck15_a1p00.txt').T[0] plin = np.loadtxt('../data/Planck15_a1p00.txt').T[1] ipklin = iuspline(klin, plin) stages = np.linspace(a0, a, nsteps, endpoint=True) tf.reset_default_graph() # Run normal flowpm to generate data ic = np.load('../data/poisson_L%04d_N%03d/ic.npy' % (bs, nc)) fin = np.load('../data/poisson_L%04d_N%03d/final.npy' % (bs, nc)) data = np.load('../data/poisson_L%04d_N%03d/psample_%0.2f.npy' % (bs, nc, plambda)) k, pic = tools.power(ic[0] + 1, boxsize=bs) k, pfin = tools.power(fin[0], boxsize=bs) plt.plot(k, pic) plt.plot(k, pfin) plt.loglog() plt.grid(which='both') plt.savefig('pklin.png') plt.close() print(pic) print(pfin) #sys.exit(-1) ################################################################ tf.reset_default_graph() print('ic constructed') #noise = np.random.normal(0, 1, nc**3).reshape(fin.shape).astype(np.float32)*1 #data_noised = fin + noise #data = data_noised startpos = np.random.normal(0, 1, nc**3).reshape(fin.shape).astype( np.float32) * 1 startpos = startpos.flatten() x0 = tf.placeholder(dtype=tf.float32, shape=data.flatten().shape, name='initlin') xlin = tf.placeholder(dtype=tf.float32, shape=data.shape, name='linfield') Rsm = tf.placeholder(tf.float32, name='smoothing') def recon_prototype(linearflat): """ """ linear = tf.reshape(linearflat, data.shape) # #loss = tf.reduce_sum(tf.square(linear - minimum)) state = lpt_init(linear, a0=0.1, order=1) final_state = nbody(state, stages, FLAGS.nc) final_field = cic_paint(tf.zeros_like(linear), final_state[0]) #final_field = pmgraph(linear) base = final_field if FLAGS.anneal: Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc) smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq)) basek = r2c3d(base, norm=nc**3) basek = tf.multiply(basek, tf.cast(smwts, tf.complex64)) base = c2r3d(basek, norm=nc**3) galmean = tfp.distributions.Poisson(rate=plambda * (1 + base)) logprob = -tf.reduce_sum(galmean.log_prob(data)) #logprob = tf.multiply(logprob, 1/nc**3, name='logprob') #Prior lineark = r2c3d(linear, norm=nc**3) priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32)) prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt)) #prior = tf.multiply(prior, 1/nc**3, name='prior') # loss = logprob + prior grad = tf.gradients(loss, linearflat) print(grad) return loss, grad[0] @tf.function def min_lbfgs(): return tfp.optimizer.lbfgs_minimize( #make_val_and_grad_fn(recon_prototype), recon_prototype, initial_position=x0, tolerance=1e-10, max_iterations=FLAGS.niter) tfinal_field = pmgraph(xlin) RRs = [2.0, 1.0, 0.0] start0 = time.time() with tf.Session() as sess: for iR, RR in enumerate(RRs): start = time.time() results = sess.run(min_lbfgs(), {Rsm: RR, x0: startpos}) print("\n") print(results) print("\n") startpos = results.position print(startpos) print("\nTime taken for %d iterations: " % FLAGS.niter, time.time() - start) minic = startpos.reshape(data.shape) minfin = sess.run(tfinal_field, {xlin: minic}) dg.saveimfig("R%d" % RR, [minic, minfin], [ic, fin], fpath + '') dg.save2ptfig("R%d" % RR, [minic, minfin], [ic, fin], fpath + '', bs) np.save(fpath + 'recon-icR%d' % RR, minic) np.save(fpath + 'recon-finalR%d' % RR, minfin) #tf.reset_default_graph() print("\n") print('\nminimized\n') print('\nTotal time taken %d iterations: ' % (len(RRs) * FLAGS.niter), time.time() - start0) #tfic = linear_field(FLAGS.nc, FLAGS.box_size, ipklin, batch_size=1, seed=100, dtype=dtype)*0 + minimum.reshape(data_noised.shape) #state = lpt_init(tfic, a0=0.1, order=1) #final_state = nbody(state, stages, FLAGS.nc) #tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0]) ## exit(0)
def main(_): dtype = tf.float32 startw = time.time() tf.random.set_random_seed(100) np.random.seed(100) # Compute a few things first, using simple tensorflow a0 = FLAGS.a0 a = FLAGS.af nsteps = FLAGS.nsteps bs, nc = FLAGS.box_size, FLAGS.nc klin = np.loadtxt('../data/Planck15_a1p00.txt').T[0] plin = np.loadtxt('../data/Planck15_a1p00.txt').T[1] ipklin = iuspline(klin, plin) stages = np.linspace(a0, a, nsteps, endpoint=True) tf.reset_default_graph() # Run normal flowpm to generate data try: ic, fin = np.load(fpath + 'ic.npy'), np.load(fpath + 'final.npy') print('Data loaded') except Exception as e: print('Exception occured', e) tfic = linear_field(FLAGS.nc, FLAGS.box_size, ipklin, batch_size=1, seed=100, dtype=dtype) if FLAGS.nbody: state = lpt_init(tfic, a0=0.1, order=1) final_state = nbody(state, stages, FLAGS.nc) else: final_state = lpt_init(tfic, a0=stages[-1], order=1) tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0]) with tf.Session() as sess: ic, fin = sess.run([tfic, tfinal_field]) np.save(fpath + 'ic', ic) np.save(fpath + 'final', fin) k, pic = tools.power(ic[0] + 1, boxsize=bs) k, pfin = tools.power(fin[0], boxsize=bs) plt.plot(k, pic) plt.plot(k, pfin) plt.loglog() plt.grid(which='both') plt.savefig('pklin.png') plt.close() print(pic) print(pfin) #sys.exit(-1) ################################################################ tf.reset_default_graph() print('ic constructed') noise = np.random.normal(0, 1, nc**3).reshape(fin.shape).astype( np.float32) * 1 data_noised = fin + noise data = data_noised startpos = noise.copy().flatten().astype(np.float32) x0 = tf.placeholder(dtype=tf.float32, shape=data.flatten().shape, name='initlin') Rsm = tf.placeholder(tf.float32, name='smoothing') def recon_prototype(linearflat): """ """ linear = tf.reshape(linearflat, data.shape) # #loss = tf.reduce_sum(tf.square(linear - minimum)) state = lpt_init(linear, a0=0.1, order=1) final_state = nbody(state, stages, FLAGS.nc) final_field = cic_paint(tf.zeros_like(linear), final_state[0]) residual = final_field - data.astype(np.float32) base = residual Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc) smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq)) basek = r2c3d(base, norm=nc**3) basek = tf.multiply(basek, tf.cast(smwts, tf.complex64)) base = c2r3d(basek, norm=nc**3) # chisq = tf.multiply(base, base) chisq = tf.reduce_sum(chisq) chisq = tf.multiply(chisq, 1 / nc**3, name='chisq') #Prior lineark = r2c3d(linear, norm=nc**3) priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32)) prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt)) prior = tf.multiply(prior, 1 / nc**3, name='prior') # loss = chisq + prior grad = tf.gradients(loss, linearflat) print(grad) return loss, grad[0] @tf.function def min_lbfgs(): return tfp.optimizer.lbfgs_minimize( #make_val_and_grad_fn(recon_prototype), recon_prototype, initial_position=x0, tolerance=1e-10, max_iterations=100) with tf.Session() as sess: start = time.time() results = sess.run(min_lbfgs(), {Rsm: 2, x0: startpos}) print("\n") print(results) print("\n") minimum = results.position print(minimum) print("\nTime taken : ", time.time() - start) start = time.time() results = sess.run(min_lbfgs(), {Rsm: 1, x0: minimum}) print("\n") print(results) minimum = results.position print("\n") print(minimum) print("\nTime taken : ", time.time() - start) start = time.time() results = sess.run(min_lbfgs(), {Rsm: 0, x0: minimum}) print("\n") print(results) minimum = results.position print("\n") print(minimum) print("\nTime taken : ", time.time() - start) tf.reset_default_graph() print("\n") print('\nminimized\n') tfic = linear_field( FLAGS.nc, FLAGS.box_size, ipklin, batch_size=1, seed=100, dtype=dtype) * 0 + minimum.reshape(data_noised.shape) state = lpt_init(tfic, a0=0.1, order=1) final_state = nbody(state, stages, FLAGS.nc) tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0]) with tf.Session() as sess: minic, minfin = sess.run([tfic, tfinal_field]) dg.saveimfig(0, [minic, minfin], [ic, fin], fpath + '') dg.save2ptfig(0, [minic, minfin], [ic, fin], fpath + '', bs) np.save(fpath + 'recon0ic', minic) np.save(fpath + 'recon-final', minfin) ## exit(0)
def main(): #bs, nc = 400, 64 #ncf, stepf = nc*4, 40 numd = 1e-3 num = int(numd*bs**3) seed = 100 path = '//mnt/ceph/users/cmodi/cosmo4d/z00/' dyn = "%02dstep_B1"%nsteps dynf = "%02dstep_B1"%nstepsf hpath = path + '/L%04d_N%04d_%s//'%(bs, ncf, dynf) path = path + '/L%04d_N%04d_%s//'%(bs, nc, dyn) ic = tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/s/'%(bs, nc, seed, nsteps)) final = tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/d/'%(bs, nc, seed, nsteps)) hpos = tools.readbigfile(hpath + '/L%04d_N%04d_S%04d_%02dstep/FOF/PeakPosition/'%(bs, ncf, seed, nstepsf))[:num] hmassall = tools.readbigfile(hpath + '/L%04d_N%04d_S%04d_%02dstep/FOF/Mass/'%(bs, ncf, seed, nstepsf)).flatten() print(hmassall.shape, hmassall.shape[0]/bs**3, hmassall.shape[0]/bs**3 /numd) hmass = hmassall[:num] print(hmass.shape, hmass.shape[0]/bs**3, hmass.shape[0]/bs**3 /numd) hmeshpos = tools.paintcic(hpos, bs, nc) hmeshmass = tools.paintcic(hpos, bs, nc, hmass.flatten()*1e10) hmeshmass /= hmeshmass.mean() hmeshmass -= 1 hmeshpos /= hmeshpos.mean() hmeshpos -= 1 if posdata: data = tf.constant(hmeshpos.reshape(1, nc, nc, nc), dtype=tf.float32) else: data = tf.constant(hmeshmass.reshape(1, nc, nc, nc), dtype=tf.float32) base = hmeshpos #base = (base - base.mean())/base.mean() pfin = tools.power(final, boxsize=bs)[1] ph = tools.power(1+base, boxsize=bs)[1] bias = ((ph[1:5]/pfin[1:5])**0.5).mean() tfdisplaced, tfrandom = standardrecon(data, tf.expand_dims(tf.constant(hpos, dtype=tf.float32), 0), tf.constant(bias, dtype=tf.float32), R=tf.constant(8, dtype=tf.float32)) displaced, random = tfdisplaced.numpy()[0], tfrandom.numpy()[0] displaced /= displaced.mean() displaced -= 1 random /= random.mean() random -= 1 recon = np.squeeze(displaced - random) print(recon.mean()) print(displaced.shape, random.shape) import matplotlib.pyplot as plt plt.figure(figsize = (9, 4)) plt.subplot(131) plt.imshow(ic.sum(axis=0)) plt.subplot(132) plt.imshow(data.numpy()[0].sum(axis=0)) plt.subplot(133) plt.imshow(recon.sum(axis=0)) plt.savefig('tmp.png') plt.close() print(ic.mean(), recon.mean()) k, p1 = tools.power(ic+1, boxsize=bs) p2 = tools.power(recon+1, boxsize=bs)[1] px = tools.power(ic+1, f2=recon+1, boxsize=bs)[1] plt.plot(k, p2/p1) plt.plot(k, px/(p1*p2)**0.5, '--') plt.semilogx() plt.savefig('tmp2.png') plt.close() for R in [4, 8, 16, 24, 32, 64, 128, 200, 256]: tfdisplaced, tfrandom = standardrecon(data, tf.expand_dims(tf.constant(hpos, dtype=tf.float32), 0), tf.constant(bias, dtype=tf.float32), R=tf.constant(R, dtype=tf.float32)) displaced, random = tfdisplaced.numpy()[0], tfrandom.numpy()[0] displaced /= displaced.mean() displaced -= 1 random /= random.mean() random -= 1 recon = np.squeeze(displaced - random) print(ic.mean(), recon.mean()) k, p1 = tools.power(ic+1, boxsize=bs) p2 = tools.power(recon+1, boxsize=bs)[1] px = tools.power(ic+1, f2=recon+1, boxsize=bs)[1] #plt.plot(k, p2/p1) plt.plot(k, px/(p1*p2)**0.5, '-', label=R) plt.semilogx() plt.legend() plt.semilogx() plt.grid(which='both') plt.ylim(-0.2, 1.2) plt.savefig('stdRcompare.png')
lr=0.5, x_init=x_init, useprior=True) check_2pt( datamodel, #[[x_test, y_test], [x_init, minic]], #[[x_test, y_test], [pred_adam, pred_adam10, minic]], grad_params, ofolder + 'fid_recon') [[x_test + 1., y_test], [x_init + 1., minic + 1.]], [[x_test + 1., y_test], [pred_adam + 1., pred_adam10 + 1., minic + 1.]], grad_params, ofolder + 'fid_recon') bmodeltf = datamodel.biasfield(x_test, bias) error = y_test - bmodeltf k, ph = tools.power(y_test.numpy()[0], boxsize=bs) k, pb = tools.power(bmodeltf.numpy()[0], boxsize=bs) kerror, perr = tools.power(error.numpy()[0] + 1, boxsize=bs) kny = nc * np.pi / bs cutoff = 1.5 perr[np.where(kerror > cutoff * kny)] = perr[np.where( kerror > cutoff * kny)[0][0]] k, px = tools.power(y_test.numpy()[0], f2=bmodeltf.numpy()[0], boxsize=bs) fig, ax = plt.subplots(1, 2, figsize=(9, 4)) ax[0].plot(k, ph, label='halo') ax[0].plot(k, pb, '--', label='bias') ax[0].plot(k, perr, label='error') ax[0].loglog() ax[1].plot(k, px / (ph * pb)**0.5) ax[1].plot(k, (pb / ph)**0.5, '--', label='tf')
def sampletrue(modpath): print('sampling true') tf.reset_default_graph() module = hub.Module(modpath + '/likelihood/') xx = tf.placeholder(tf.float32, shape=[None, None, None, None, nchannels], name='input') yy = tf.placeholder(tf.float32, shape=[None, None, None, None, ntargets], name='labels') samples = module(dict(features=xx, labels=yy), as_dict=True)['sample'] loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood'] # j = 0 bs, nc = bsnclist[j] with tf.Session() as sess: sess.run(tf.initializers.global_variables()) start = time() vseeds = np.random.choice(test_features[j].shape[0], 1) xxm = test_features[j][vseeds] yym = test_target[j][vseeds] print('xxm, yym shape = ', xxm.shape, yym.shape) sample = np.zeros_like(yym) sample2 = sess.run(samples, feed_dict={xx: xxm, yy: yym * 0}) for i in range(yym.shape[1]): for j in range(yym.shape[2]): for k in range(yym.shape[3]): data_dict = {xx: xxm, yy: sample} next_sample = sess.run(samples, feed_dict=data_dict) sample[:, i, j, k, :] = next_sample[:, i, j, k, :] end = time() print('Taken : ', end - start) plt.figure(figsize=(12, 4)) vmin, vmax = None, None plt.subplot(131) plt.imshow(yym[0, ..., 0].sum(axis=0), vmin=vmin, vmax=vmax) plt.colorbar() plt.title('Data') plt.subplot(132) plt.imshow(sample[0, ..., 0].sum(axis=0), vmin=vmin, vmax=vmax) plt.colorbar() plt.title('Correct sample') plt.subplot(133) plt.imshow(sample2[0, ..., 0].sum(axis=0), vmin=vmin, vmax=vmax) plt.colorbar() plt.title('Single pass sample') plt.savefig(savepath + '/sampletrue_im%d' % max_steps) ## ii = 0 k, ph = tools.power(yym[ii, ..., ], boxsize=bs) k, pp1 = tools.power(sample[ii, ..., ], boxsize=bs) k, pp1x = tools.power(sample[ii, ..., ], yym[ii, ..., ], boxsize=bs) k, pp2 = tools.power(sample2[ii, ..., ], boxsize=bs) k, pp2x = tools.power(sample2[ii, ..., ], yym[ii, ..., ], boxsize=bs) plt.figure(figsize=(10, 4)) plt.subplot(121) # plt.plot(k, ph, 'C%d-'%ii) plt.plot(k, pp1 / ph, label='Correct') plt.plot(k, pp2 / ph, label='Single Pass') plt.ylim(0, 1.5) plt.grid(which='both') plt.semilogx() plt.legend() # plt.loglog() plt.subplot(122) plt.plot(k, pp1x / (pp1 * ph)**0.5) plt.plot(k, pp2x / (pp2 * ph)**0.5) plt.ylim(0, 1) plt.grid(which='both') plt.semilogx() plt.savefig(savepath + '/sampletrue_2pt%d' % max_steps)
def check_module(modpath): print('\nTest module\n') tf.reset_default_graph() module = hub.Module(modpath + '/likelihood/') xx = tf.placeholder(tf.float32, shape=[None, None, None, None, nchannels], name='input') yy = tf.placeholder(tf.float32, shape=[None, None, None, None, ntargets], name='labels') samples = module(dict(features=xx, labels=yy), as_dict=True)['sample'] loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood'] for j in range(nsizes): bs, nc = bsnclist[j] with tf.Session() as sess: sess.run(tf.initializers.global_variables()) vseeds = np.random.choice(test_features[j].shape[0], 10) xxm = test_features[j][vseeds] yym = test_target[j][vseeds] print('xxm, yym shape = ', xxm.shape, yym.shape) preds = sess.run(samples, feed_dict={xx: xxm, yy: yym}) ############################## ##Power spectrum shape = [nc, nc, nc] kk = tools.fftk(shape, bs) kmesh = sum(i**2 for i in kk)**0.5 print(kmesh.shape) print(preds.shape, yym.shape) fig, axar = plt.subplots(2, 2, figsize=(8, 8)) ax = axar[0] for iseed, seed in enumerate(vseeds): predict, hpmeshd = np.squeeze(preds[iseed]), np.squeeze(yym[iseed]) k, pkpred = tools.power(predict / predict.mean(), boxsize=bs, k=kmesh) k, pkhd = tools.power(hpmeshd / hpmeshd.mean(), boxsize=bs, k=kmesh) k, pkhx = tools.power(hpmeshd / hpmeshd.mean(), predict / predict.mean(), boxsize=bs, k=kmesh) ## ax[0].semilogx(k[1:], pkpred[1:] / pkhd[1:], label=seed) ax[1].semilogx(k[1:], pkhx[1:] / (pkpred[1:] * pkhd[1:])**0.5) for axis in ax.flatten(): axis.legend(fontsize=14, ncol=3) axis.set_yticks(np.arange(0, 1.2, 0.1)) axis.grid(which='both') axis.set_ylim(0., 1.1) ax[0].set_ylabel('Transfer function', fontsize=14) ax[1].set_ylabel('Cross correlation', fontsize=14) # ax = axar[1] vmin, vmax = 1, (hpmeshd[:, :, :].sum(axis=0)).max() im = ax[0].imshow(predict[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) plt.colorbar(im, ax=ax[0]) im = ax[1].imshow(hpmeshd[:, :, :].sum(axis=0), vmin=vmin, vmax=vmax) plt.colorbar(im, ax=ax[1]) ax[0].set_title('Prediction', fontsize=15) ax[1].set_title('Truth', fontsize=15) plt.savefig(savepath + '/vpredict%d-%d.png' % (nc, max_steps)) plt.show() plt.figure() plt.hist(yym.flatten(), range=(-1, 20), bins=100, label='target', alpha=0.8) plt.hist(preds.flatten(), range=(-1, 20), bins=100, label='prediict', alpha=0.5) plt.legend() plt.savefig(savepath + '/hist%d-%d.png' % (nc, max_steps)) plt.show() ## dosampletrue = False if max_steps in [100, 5000, 10000, 15000, 20000, 25000, 30000]: dosampletrue = True csize = 32 if dosampletrue: sampletrue(modpath)
def main(_): infield = True dtype = tf.float32 mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape) nc, bs = FLAGS.nc, FLAGS.box_size a0, a, nsteps = FLAGS.a0, FLAGS.af, FLAGS.nsteps stages = np.linspace(a0, a, nsteps, endpoint=True) numd = 1e-3 ##Begin here klin = np.loadtxt('../data/Planck15_a1p00.txt').T[0] plin = np.loadtxt('../data/Planck15_a1p00.txt').T[1] ipklin = iuspline(klin, plin) #pypath = '/global/cscratch1/sd/chmodi/cosmo4d/output/version2/L0400_N0128_05step-fof/lhd_S0100/n10/opt_s999_iM12-sm3v25off/meshes/' final = tools.readbigfile('../data//L0400_N0128_S0100_05step/mesh/d/') ic = tools.readbigfile('../data/L0400_N0128_S0100_05step/mesh/s/') fpos = tools.readbigfile( '../data/L0400_N0128_S0100_05step/dynamic/1/Position/') hpos = tools.readbigfile( '../data/L0400_N0512_S0100_40step/FOF/PeakPosition//')[1:int(bs**3 * numd)] hmass = tools.readbigfile( '../data/L0400_N0512_S0100_40step/FOF/Mass//')[1:int(bs**3 * numd)].flatten() meshpos = tools.paintcic(hpos, bs, nc) meshmass = tools.paintcic(hpos, bs, nc, hmass.flatten() * 1e10) data = meshmass data /= data.mean() data -= 1 kv = tools.fftk([nc, nc, nc], bs, symmetric=True, dtype=np.float32) datasm = tools.fingauss(data, kv, 3, np.pi * nc / bs) ic, data = np.expand_dims(ic, 0), np.expand_dims(data, 0).astype(np.float32) datasm = np.expand_dims(datasm, 0).astype(np.float32) print("Min in data : %0.4e" % datasm.min()) np.save(fpath + 'ic', ic) np.save(fpath + 'data', data) #################################################### # tf.reset_default_graph() tfic = tf.constant(ic.astype(np.float32)) state = lpt_init(tfic, a0=0.1, order=1) final_state = nbody(state, stages, FLAGS.nc) tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0]) with tf.Session() as sess: state = sess.run(final_state) fpos = state[0, 0] * bs / nc bparams, bmodel = getbias(bs, nc, data[0] + 1, ic[0], fpos) #bmodel += 1 #np.expand_dims(bmodel, 0) + 1 errormesh = data - np.expand_dims(bmodel, 0) kerror, perror = tools.power(errormesh[0] + 1, boxsize=bs) kerror, perror = kerror[1:], perror[1:] print("Error power spectra", kerror, perror) print("\nkerror", kerror.min(), kerror.max(), "\n") print("\nperror", perror.min(), perror.max(), "\n") suff = "-error" dg.saveimfig(suff, [ic, errormesh], [ic, data], fpath + '/figs/') dg.save2ptfig(suff, [ic, errormesh], [ic, data], fpath + '/figs/', bs) ipkerror = iuspline(kerror, perror) #################################################### #stdinit = srecon.standardinit(bs, nc, meshpos, hpos, final, R=8) recon_estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=fpath) def predict_input_fn(data=data, M0=0., w=3., R0=0., off=None, istd=None, x0=None): features = {} features['datasm'] = data features['R0'] = R0 features['x0'] = x0 features['bparams'] = bparams features['ipkerror'] = [kerror, perror] #ipkerror return features, None eval_results = recon_estimator.predict( input_fn=lambda: predict_input_fn(x0=ic), yield_single_examples=False) for i, pred in enumerate(eval_results): if i > 0: break suff = '-model' dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/') dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/', bs) np.save(fpath + '/reconmeshes/ic_true' + suff, pred['ic']) np.save(fpath + '/reconmeshes/fin_true' + suff, pred['final']) np.save(fpath + '/reconmeshes/model_true' + suff, pred['model']) # randominit = np.random.normal(size=data.size).reshape(data.shape) #eval_results = recon_estimator.predict(input_fn=lambda : predict_input_fn(x0 = np.expand_dims(stdinit, 0)), yield_single_examples=False) eval_results = recon_estimator.predict( input_fn=lambda: predict_input_fn(x0=randominit), yield_single_examples=False) for i, pred in enumerate(eval_results): if i > 0: break suff = '-init' dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/') dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/', bs) np.save(fpath + '/reconmeshes/ic_init' + suff, pred['ic']) np.save(fpath + '/reconmeshes/fin_init' + suff, pred['final']) np.save(fpath + '/reconmeshes/model_init' + suff, pred['model']) # # Train and evaluate model. RRs = [4., 2., 1., 0.5, 0.] niter = 100 iiter = 0 for R0 in RRs: print('\nFor iteration %d\n' % iiter) print('With R0=%0.2f \n' % (R0)) def train_input_fn(): features = {} features['datasm'] = data features['R0'] = R0 features['bparams'] = bparams features['ipkerror'] = [kerror, perror] #ipkerror #features['x0'] = np.expand_dims(stdinit, 0) features['x0'] = randominit features['lr'] = 0.01 return features, None recon_estimator.train(input_fn=train_input_fn, max_steps=iiter + niter) eval_results = recon_estimator.predict(input_fn=predict_input_fn, yield_single_examples=False) for i, pred in enumerate(eval_results): if i > 0: break iiter += niter # suff = '-%d-R%d' % (iiter, R0) dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/') dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data], fpath + '/figs/', bs) np.save(fpath + '/reconmeshes/ic' + suff, pred['ic']) np.save(fpath + '/reconmeshes/fin' + suff, pred['final']) np.save(fpath + '/reconmeshes/model' + suff, pred['model']) sys.exit(0) ## exit(0)
def getbias(bs, nc, hmesh, basemesh, pos, doed=False, fpos=None, kmax=0.3, fitshear=False): print('Will fit for bias now') try: d0, d2, s2 = basemesh except: d0 = basemesh.copy() d0 -= basemesh.mean() d2 = 1.*d0**2 d2 -= d2.mean() s2 = shear(d0) s2 -= 1.*d0**2 s2 -= s2.mean() k, ph = tools.power(hmesh, boxsize = bs) ik = numpy.where(k > kmax)[0][0] ed0 = tools.paintcic(pos, bs, nc, mass=d0.flatten()) ed2 = tools.paintcic(pos, bs, nc, mass=d2.flatten()) es2 = tools.paintcic(pos, bs, nc, mass=s2.flatten()) if abs(ed0.mean()) < 1e-3: ed0 += 1 if abs(ed2.mean()) < 1e-3: ed2 += 1 if abs(es2.mean()) < 1e-3: es2 += 1 ped0 = tools.power(ed0, boxsize=bs)[1] ped2 = tools.power(ed2, boxsize=bs)[1] pes2 = tools.power(es2, boxsize=bs)[1] pxed0d2 = tools.power(ed0, f2=ed2, boxsize=bs)[1] pxed0s2 = tools.power(ed0, f2=es2, boxsize=bs)[1] pxed2s2 = tools.power(ed2, f2=es2, boxsize=bs)[1] pxhed0 = tools.power(hmesh, f2=ed0, boxsize=bs)[1] pxhed2 = tools.power(hmesh, f2=ed2, boxsize=bs)[1] pxhes2 = tools.power(hmesh, f2=es2, boxsize=bs)[1] if doed: ed = tools.paintcic(pos, bs, nc, mass=np.ones(pos.shape[0])) ped = tools.power(ed, boxsize=bs)[1] pxhed = tools.power(hmesh, f2=ed, boxsize=bs)[1] pxedd0 = tools.power(ed, f2=ed0, boxsize=bs)[1] pxedd2 = tools.power(ed, f2=ed2, boxsize=bs)[1] pxeds2 = tools.power(ed, f2=es2, boxsize=bs)[1] def ftomin(bb, ii=ik, retp = False): b1, b2, bs = bb # if fitshear: pass else: bs = 0. # pred = b1**2 *ped0 + b2**2*ped2 + 2*b1*b2*pxed0d2 pred += bs**2 *pes2 + 2*b1*bs*pxed0s2 + 2*b2*bs*pxed2s2 if doed: pred += ped + 2*b1*pxedd0 + 2*b2*pxedd2 + 2*bs*pxeds2 predx = 1*b1*pxhed0 + 1*b2*pxhed2 predx += 1*bs*pxhes2 if doed: predx += 1*pxhed if retp : return pred, predx chisq = (((ph + pred - 2*predx)[1:ii])**2).sum()**0.5.real return chisq.real print('Minimize\n') # b1, b2, bs2 = minimize(ftomin, [1, 1, 1], method='Nelder-Mead', options={'maxfev':10000}).x params = minimize(ftomin, [1, 0, 0]).x b1, b2, bs2 = params print('\nBias fit params are : ', b1, b2, bs2) ed0 = tools.paintcic(pos, bs, nc, mass=d0.flatten()) ed2 = tools.paintcic(pos, bs, nc, mass=d2.flatten()) es2 = tools.paintcic(pos, bs, nc, mass=s2.flatten()) if fpos is not None: ed0 = tools.paintcic(fpos, bs, nc, mass=d0.flatten()) ed2 = tools.paintcic(fpos, bs, nc, mass=d2.flatten()) es2 = tools.paintcic(fpos, bs, nc, mass=s2.flatten()) mod = b1*ed0 + b2*ed2 + bs2*es2 else: mod = b1*ed0 + b2*ed2 + bs2*es2 if doed: ed = tools.paintcic(pos, bs, nc, mass=np.ones(pos.shape[0])) mod += ed return params, mod
def main(): """ Model function for the CosmicRIM. """ if args.parallel: rim = build_rim_parallel(params) else: rim = build_rim_split(params) grad_fn = recon_dm_grad # # traindata, testdata = get_data() idx = np.random.randint(0, traindata.shape[0], 1) xx, yy = traindata[idx, 0].astype(np.float32), traindata[idx, 1].astype(np.float32), x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype( np.float32) x_pred = rim(x_init, yy, grad_fn) trainiter = args.trainiter rim.load_weights(ofolder + '%d' % trainiter) print('Loaded') idx = np.random.randint(0, testdata.shape[0], 1) x_test, y_test = testdata[idx, 0].astype( np.float32), testdata[idx, 1].astype(np.float32), x_init = np.random.normal(size=x_test.size).reshape(x_test.shape).astype( np.float32) pred = rim(tf.constant(x_init), tf.constant(y_test), grad_fn) fig, ax = plt.subplots(1, 2, figsize=(9, 4)) k, pkt = tools.power(x_test[0], boxsize=bs) lss = ["-"] * 7 + ["--"] * 7 print(lss) for i in range(pred.shape[0]): print(i, pred[i].shape, x_test.shape) k, pk = tools.power(pred[i, 0].numpy(), boxsize=bs) k, px = tools.power(pred[i, 0].numpy(), f2=x_test[0], boxsize=bs) rcc = px / (pkt * pk)**0.5 print(rcc) ax[0].plot(k, rcc, 'C%d' % (i % 7), alpha=0.7, ls=lss[(i % 7)], label=i) ax[1].plot(k, (pk / pkt)**0.5, 'C%d' % (i % 7), alpha=0.7, ls=lss[(i % 7)]) for axis in ax: axis.semilogx() axis.legend() axis.grid(which='both') ax[0].set_ylim(-0.1, 1.2) ax[1].set_ylim(-0.2, 2.5) plt.savefig('./figs/2pt-iters.png') plt.close() fig, ax = plt.subplots(2, 5, figsize=(14, 8)) for i in range(10): ax.flatten()[i].imshow(pred[i + 1, 0].numpy().sum(axis=0)) plt.savefig('./figs/im-iters.png') plt.close() lss = ['-', '--', ':', '-.'] pred_adam = adam(tf.constant(x_init), tf.constant(y_test), grad_fn) pred_adam = [pred_adam[0].numpy(), pm(pred_adam)[0].numpy()] pred_adam10 = adam10(tf.constant(x_init), tf.constant(y_test), grad_fn) pred_adam10 = [pred_adam10[0].numpy(), pm(pred_adam10)[0].numpy()] minic, minfin = fid_recon.reconstruct(tf.constant(y_test), RRs=[1.0, 0.0], niter=args.rim_iter * 10, lr=0.1) compares = [pred_adam, pred_adam10, [minic[0], minfin[0]]] print('Test set generated') check_im(x_test[0], x_init[0], pred.numpy()[0], fname='rim-im') check_2pt(x_test, y_test, rim, grad_fn, compares, fname='rim-2pt') x_init = pred.numpy().copy() pred = rim(tf.constant(pred), tf.constant(y_test), grad_fn)[-1] check_im(x_test[0], x_init[0], pred.numpy()[0], fname='rim-im-pred') check_2pt(x_test, y_test, rim, grad_fn, compares, fname='rim-2pt-pred') x_init = y_test pred = rim(tf.constant(pred), tf.constant(y_test), grad_fn)[-1] check_im(x_test[0], x_init[0], pred.numpy()[0], fname='rim-im-data') check_2pt(x_test, y_test, rim, grad_fn, compares, fname='rim-2pt-data') x_init = x_test pred = rim(tf.constant(pred), tf.constant(y_test), grad_fn)[-1] check_im(x_test[0], x_init[0], pred.numpy()[0], fname='rim-im-truth') check_2pt(x_test, y_test, rim, grad_fn, compares, fname='rim-2pt-truth')