def get_data(sim_file, halo_index):

    HUBBLE_CONST = 0.688062

    sdf_data = load_sdf(sim_file)

    x = float(sdf_data['x'][halo_index] / HUBBLE_CONST)
    y = float(sdf_data['y'][halo_index] / HUBBLE_CONST)
    z = float(sdf_data['z'][halo_index] / HUBBLE_CONST)

    vx = float(sdf_data['vx'][halo_index])
    vy = float(sdf_data['vy'][halo_index])
    vz = float(sdf_data['vz'][halo_index])

    mvir = float(sdf_data['mvir'][halo_index] / HUBBLE_CONST)
    r200b = float(sdf_data['r200b'][halo_index] / HUBBLE_CONST)

    hid = int(sdf_data['id'][halo_index])
    hpid = int(sdf_data['pid'][halo_index])

    data = {
        'x': x,
        'y': y,
        'z': z,
        'vx': vx,
        'vy': vy,
        'vz': vz,
        'mvir': mvir,
        'r200b': r200b,
        'id': hid,
        'pid': hpid
    }

    return data
示例#2
0
from yt.utilities.sdf import load_sdf
import math

HUBBLE_CONST = 0.688062

sim_file = '/media/jsnguyen/JK-PEXHD/ds14_a_halos_1.0000'

save_dir = '/home/jsnguyen/DSS_data/'

load_data_fn = 'reduced_5Mpc_mass_filter_subhalos_1e+14.txt'

save_data_fn = 'full_data_' + load_data_fn

sdf_data = load_sdf(sim_file)

f_pairs_data = open(save_dir + save_data_fn, 'w')

#header describes the format of how a pair is stored
f_pairs_data.write('# pair_id\n')
f_pairs_data.write('# ax ay az avx avy avz amvir ar200b aid apid\n')
f_pairs_data.write('# bx by bz bvx bvy bvz bmvir br200b bid bpid\n')

f_pairs_data.close()

f_pairs = open(save_dir + load_data_fn, 'r')
f_pairs.next()  #skip header line
i = 0
for line in f_pairs:

    halo_a = int(line.split()[0])
    halo_b = int(line.split()[1])
示例#3
0
import time
import numpy as np
import yt
import sklearn
from sklearn.cluster import MiniBatchKMeans
import sys
for arg in sys.argv:
    k=arg
k = int(k)
yt.funcs.mylog.setLevel(50) #coerce output null
print "{} clusters test".format(k)
num =100000000
print "Loading {} Particles".format(num)
from yt.utilities.sdf import load_sdf
path = 'http://darksky.slac.stanford.edu/simulations/ds14_a/ds14_a_1.0000'
data = load_sdf(path)
x = data['x'][:num]
print x.nbytes
y = data['y'][:num]
z = data['z'][:num]
idx = data['ident'][:num]
#ds = yt.load("../../ds14_scivis_0128_e4_dt04_1.0000")
#ad = ds.all_data()
#x = ad[("all","particle_position_x")]
#y = ad[("all","particle_position_y")]
#z = ad[("all","particle_position_z")]
#idx = ad[("all","particle_index")]
train = np.array([idx,x,y,z]).T
np.savetxt("train.txt",train)
avrg = open('avrg_dens.txt', 'a')
timef = open('time.txt','a')
示例#4
0
import struct
from random import *
from math import *
import sys


if len(sys.argv) < 2:
   print "usage: %s <filename>" % sys.argv[0]
   quit()

print "data%s.xyzb" % sys.argv[1][-4:]

fileToOpen = sys.argv[1]
fileToSave = "data%s.xyzb" % sys.argv[1][-4:]

sdfdata = load_sdf(fileToOpen)
file = open(fileToSave, 'wb')

'''
for ix in range(0,sdfdata['x'].size):

    if (sdfdata['vx'][ix] > 0):
        maxVX = minVX = sdfdata['vx'][ix]
        print (ix)
        break

for iy in range(0,sdfdata['x'].size):
    
    if (sdfdata['vy'][iy] > 0):
        maxVY = minVY = sdfdata['vy'][iy]
        print (iy)