Beispiel #1
0
def calc(psi, nt, C):
    i = slice(halo, psi.size - halo)

    for _ in range(nt):
        ###
        #psi[:halo] = psi[i][-halo:]
        #psi[-halo:] = psi[i][:halo]

        rank = mpi.Get_rank()
        size = mpi.Get_size()

        right = (rank + 1) % size
        left = (rank - 1 + size) % size

        psi_without_halo = psi[i]
        psi_with_halo = psi

        mpi.send(psi_without_halo[-halo:], dest=right)
        psi_with_halo[:halo] = mpi.recv(source=left)
        mpi.send(psi_without_halo[:halo], dest=left)
        psi_with_halo[-halo:] = mpi.recv(source=right)
        ###

        psi[i] = upwind(psi, i, C)

    return psi
Beispiel #2
0
def main():
    args = parse_args()
    assert args.pretrained_model_path is None or args.pretrained_model_path.endswith(
        ".ckpt")
    os.makedirs(args.save_dir, exist_ok=True)
    save_args(args)
    set_seed(args.seed + COMM_WORLD.Get_rank() * 100)
    nprocs = COMM_WORLD.Get_size()

    # Initialize model and agent policy
    aurora = Aurora(args.seed + COMM_WORLD.Get_rank() * 100, args.save_dir,
                    int(7200 / nprocs), args.pretrained_model_path,
                    tensorboard_log=args.tensorboard_log)
    # training_traces, validation_traces,
    training_traces = []
    val_traces = []
    if args.train_trace_file:
        with open(args.train_trace_file, 'r') as f:
            for line in f:
                line = line.strip()
                if args.dataset == 'pantheon':
                    queue = 100  # dummy value
                    # if "ethernet" in line:
                    #     queue = 500
                    # elif "cellular" in line:
                    #     queue = 50
                    # else:
                    #     queue = 100
                    training_traces.append(Trace.load_from_pantheon_file(
                        line, queue=queue, loss=0))
                elif args.dataset == 'synthetic':
                    training_traces.append(Trace.load_from_file(line))
                else:
                    raise ValueError

    if args.val_trace_file:
        with open(args.val_trace_file, 'r') as f:
            for line in f:
                line = line.strip()
                if args.dataset == 'pantheon':
                    queue = 100  # dummy value
                    # if "ethernet" in line:
                    #     queue = 500
                    # elif "cellular" in line:
                    #     queue = 50
                    # else:
                    #     queue = 100
                    val_traces.append(Trace.load_from_pantheon_file(
                        line, queue=queue, loss=0))
                elif args.dataset == 'synthetic':
                    val_traces.append(Trace.load_from_file(line))
                else:
                    raise ValueError
    print(args.randomization_range_file)

    aurora.train(args.randomization_range_file,
                 args.total_timesteps, tot_trace_cnt=args.total_trace_count,
                 tb_log_name=args.exp_name, validation_flag=args.validation,
                 training_traces=training_traces,
                 validation_traces=val_traces)
Beispiel #3
0
def plot(x, psi, psi_0, nt, v):
    ###
    rcParams["figure.figsize"] = [8 / mpi.Get_size(), 5]
    ###
    pyplot.step(x, psi_0(x), label='initial', where='mid')
    pyplot.step(x, psi_0(x - v * nt), label='analytical', where='mid')
    pyplot.step(x, psi, label='numerical', where='mid')
    pyplot.grid()
    pyplot.gca().set_ylim([0, 12])
    pyplot.legend()
    ###
    # pyplot.savefig("out.svg")
    pyplot.savefig(f"out.{mpi.Get_rank()}.svg")
Beispiel #4
0
    def __init__(self, shape, device, context):
        """ Constructor for the Space class. 

        Input variables
        shape -- Three-element tuple of positive integers defining the size of
            the space in the x-, y-, and z-directions.

        """

        # Make sure shape has exactly three elements.
        if len(shape) is not 3:
            raise TypeError('Shape must have exactly three elements.')

        # Make sure they are all integers.
        if any([type(s) is not int for s in shape]):
            raise TypeError('Shape must have only integer elements.')

        # Make sure all elements are positive.
        if any([s < 1 for s in shape]):
            raise TypeError('Shape must have only integer elements.')

#         # Make sure stencil is a single, non-negative integer.
#         if (type(stencil) is not int) or (stencil < 0):
#             raise TypeError('Stencil must be a non-negative scalar integer.')
#
# Initialize the space.
        self.shape = shape

        # Get MPI information.
        rank = comm.Get_rank()
        size = comm.Get_size()

        # Nodes to pass forward and backward (along x) to.
        self.mpi_adj = {'forw': (rank + 1) % size, 'back': (rank - 1) % size}

        # Grid is too small to be partitioned.
        if (size > self.shape[0]):
            raise TypeError('Shape is too short along x to be partitioned.')

        # Create the context on the appropriate GPU.
        # self.device, self.context = self._init_gpu(comm)
        self.device = device
        self.context = context

        # Partition the space.
        # Each space is responsible for field[x_range[0]:x_range[1],:,:].
        get_x_range = lambda r: (int(self.shape[0] * (float(r) / size)), \
                                int(self.shape[0] * (float(r+1) / size)))
        self.x_range = get_x_range(rank)

        self.all_x_ranges = [get_x_range(r) for r in range(size)]
Beispiel #5
0
def get_cpu_raw(cpu_data, k):
    # Make sure overlapped data is accurate as well.
    xr = space.get_space_info()['x_range']
    if comm.Get_rank() == 0:
        pad_back = cpu_data[-k:, :, :]
    else:
        pad_back = cpu_data[xr[0] - k:xr[0], :, :]

    if comm.Get_rank() == comm.Get_size() - 1:
        pad_front = cpu_data[:k, :, :]
    else:
        pad_front = cpu_data[xr[1]:xr[1] + k, :, :]

    return np.concatenate((pad_back, cpu_data[xr[0]:xr[1],:,:], \
                                pad_front), axis=0)
Beispiel #6
0
def process_dir(indir, outdir):
    main_text_files = glob.glob("{0}/main/*.txt".format(indir))
    rank = world.Get_rank()
    size = world.Get_size()
    main_text_files_2 = []
    for m in main_text_files:
        tilename = find_tilename(m)
        out_main = "{0}/main/{1}.fits".format(outdir, tilename)
        out_epoch = "{0}/epoch/{1}.fits".format(outdir, tilename)
        if not (os.path.exists(out_main) and os.path.exists(out_epoch)):
            main_text_files_2.append(m)
    main_text_files = main_text_files_2
    print "{0} files left to do".format(len(main_text_files))

    for i, main_text_file in enumerate(main_text_files):
        if i % size != rank:
            continue
        print rank, main_text_file
        tilename = find_tilename(main_text_file)
        epoch_text_file = "{0}/epoch/{1}.epoch.txt".format(indir, tilename)
        out_main = "{0}/main/{1}.fits".format(outdir, tilename)
        out_epoch = "{0}/epoch/{1}.fits".format(outdir, tilename)
        if os.path.exists(out_main) and os.path.exists(out_epoch):
            continue
        try:
            process_text(main_text_file,
                         epoch_text_file,
                         out_main,
                         out_epoch,
                         "r",
                         blind=False,
                         quiet=False,
                         report=report)
        except:
            print "{} did not work".format(out_main)
        if report:
            return
Beispiel #7
0
def main(nt, nx, dt, C, x_min, x_max):
    dx = (x_max - x_min) / nx

    ###
    size = mpi.Get_size()
    rank = mpi.Get_rank()

    # dla nx=5 i size=3: lepiej 2+2+1 niż 1+1+3
    import math
    nx_max = math.ceil(nx / size)
    nx = nx_max if (rank + 1) * nx_max <= nx else nx - rank * nx_max
    assert nx > 0

    x_min += dx * nx_max * rank
    x_max = min(x_max, x_min + dx * nx_max)
    #print(rank, '/', size, ':', nx, x_min, x_max)
    ###

    x = np.linspace(x_min - halo * dx,
                    x_max + halo * dx,
                    num=nx + 2 * halo,
                    endpoint=False)
    psi = calc(psi_0(x), nt, C)
    plot(x[halo:-halo], psi[halo:-halo], psi_0, nt, v=C / dt * dx)
Beispiel #8
0
def main():

    args = parse_inputs()
    if args.adaptive_bins == 'False':
        args.adaptive_bins = False
    else:
        args.adaptive_bins = True

    file = open(args.file, 'r')
    loaded_fields = pickle.load(file)
    distance = loaded_fields[0]
    y = loaded_fields[1]
    bin_data = loaded_fields[2]

    rank = CW.Get_rank()
    size = CW.Get_size()

    dist_min = np.min(distance)
    dist_max = np.max(distance)
    if args.adaptive_bins:
        rs = [0] + list(set(distance))
        rs = np.sort(rs)
        #rs = rs[::2]
        rs = np.array(rs)
        rs = np.append(rs, (dist_max + (rs[-1] - rs[-2])))
    else:
        rs = np.linspace(dist_min, dist_max, args.no_of_bins)
        rs = np.append(rs, (dist_max + (rs[-1] - rs[-2])))
    bin_size = rs[-1] - rs[-2]
    rs = np.append(rs, rs[-1] + bin_size)
    gradient = np.array(np.zeros(np.shape(distance)))
    #print "RS:", rs

    rit = 1
    printed = False
    print_cen = False
    for r in range(len(rs)):
        if rank == rit:
            grad_add = np.array(np.zeros(np.shape(distance)))
            if r - 1 < 0:
                r_0 = rs[r]
            else:
                r_0 = rs[r - 1]
            r_1 = rs[r]
            if r + 1 == len(rs):
                r_2 = rs[r]
            else:
                r_2 = rs[r + 1]
            if r + 2 == len(rs) + 1:
                r_3 = rs[r]
            elif r + 2 == len(rs):
                r_3 = rs[r + 1]
            else:
                r_3 = rs[r + 2]
            mid_01 = (r_1 + r_0) / 2.
            mid_23 = (r_3 + r_2) / 2.
            shell_01 = np.where((distance >= r_0) & (distance < r_1))[0]
            shell_12 = np.where((distance >= r_1) & (distance < r_2))[0]
            shell_23 = np.where((distance >= r_2) & (distance < r_3))[0]
            if len(shell_01) == 0:
                print("FOUND EMPTY SHELL")
                y_01 = 0.0
            else:
                y_01 = np.mean(y[shell_01])
            if len(shell_23) == 0:
                y_23 = 0.0
                print("FOUND EMPTY SHELL")
            else:
                y_23 = np.mean(y[shell_23])
            grad_val = (y_23 - y_01) / (2. * (mid_23 - mid_01))
            #if rank == 1:
            #print "r_0, r_1, r_2, r_3:", r_0, r_1, r_2, r_3
            #print "mid_01, mid_12, mid_23:", mid_01, mid_12, mid_23
            #print "y_01, y_12, y_23:", y_01, y_12, y_23, "on rank", rank
            #print "grad_1, grad_2, average", grad_1, grad_2, grad_val, "on rank", rank
            #print "Gradient =", grad_val, "at Distance =", np.mean([mid_01, mid_23]), "on rank", rank
            grad_add[shell_12] = grad_val
            #grad_add[shell] = grad_val
            CW.send(grad_add, dest=0, tag=rank)
        if rank == 0:
            grad_add = CW.recv(source=rit, tag=rit)
            gradient = gradient + grad_add
        rit = rit + 1
        if rit == size:
            rit = 1

    if rank == 0:
        os.remove(args.file)
        file = open(args.save_file, 'w+')
        print("pickle file:", args.save_file)
        pickle.dump(gradient, file)
        file.close()
Beispiel #9
0
    def test_mpi4py(self):
        from mpi4py.MPI import COMM_WORLD

        self.assertGreaterEqual(COMM_WORLD.Get_size(), 1)
Beispiel #10
0
 def test_size(sut):
     size = sut()
     assert size == COMM_WORLD.Get_size()
Beispiel #11
0
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import LineCollection
import numpy as np
import matplotlib.tri as mtri
from matplotlib.tri import tricontour
from matplotlib.tri import TriContourSet
from mpi4py.MPI import COMM_WORLD as comm
from common.io import remove_safe

rank = comm.Get_rank()
size = comm.Get_size()


__all__ = ["plot_edges", "plot_faces", "plot_contour", "plot_quiver",
           "zero_level_set", "plot_fancy", "plot_any_field"]


class Figure:
    def __init__(self, title=None, show=True, aspect_equal=True,
                 save=None, base_fig=None, xlabel="x", ylabel="y",
                 colorbar=True, clabel=None, subplots=False,
                 tight_layout=False, ticks=True):
        self.title = title
        self.show = show
        self.aspect_equal = aspect_equal
        self.save = save
        self.base_fig = base_fig
        self.colorbar = colorbar
        self.subplots = subplots
        self.colorbar_ax = None
Beispiel #12
0
seed = comm.bcast(seed)
np.random.seed(seed + comm.Get_rank())

generating_components = []
for i in range(10):
    # With the invocation "mpirun -n 10 python pmc_mpi.py", there are
    # 10 processes which means in order to draw 1,000 samples
    # ``parallel_sampler.run(1000//comm.Get_size())`` makes each process draw
    # 100 samples.
    # Hereby the generating proposal component for each sample in each process
    # is returned by ``parallel_sampler.run``.
    # In the master process, ``parallel_sampler.run`` is a list containing the
    # return values of the sequential ``run`` method of every process.
    # In all other processes, ``parallel_sampler.run`` returns the generating
    # component for its own samples only.
    last_generating_components = parallel_sampler.run(1000//comm.Get_size(), trace_sort=True)

    # In addition to the generating components, the ``sampler.run``
    # method automatically sends all samples to the master
    # process i.e. the process which fulfills comm.Get_rank() == 0.
    if comm.Get_rank() == 0:
        print("\rstep", i, "...\n\t", end='')

        # Now let PMC run only in the master process:

        # ``sampler.samples_list`` and ``sampler.weights_list`` store the weighted samples
        # sorted by the resposible process:
        # The History objects that are held by process i can be accessed via
        # ``sampler.<samples/weights>_list[i]``. The master process (i=0) also produces samples.

        # Combine the weights and samples to two arrays of 1,000 samples
Beispiel #13
0
seed = comm.bcast(seed)
np.random.seed(seed + comm.Get_rank())

generating_components = []
for i in range(10):
    # With the invocation "mpirun -n 10 python pmc_mpi.py", there are
    # 10 processes which means in order to draw 1,000 samples
    # ``parallel_sampler.run(1000//comm.Get_size())`` makes each process draw
    # 100 samples.
    # Hereby the generating proposal component for each sample in each process
    # is returned by ``parallel_sampler.run``.
    # In the master process, ``parallel_sampler.run`` is a list containing the
    # return values of the sequential ``run`` method of every process.
    # In all other processes, ``parallel_sampler.run`` returns the generating
    # component for its own samples only.
    last_generating_components = parallel_sampler.run(1000 // comm.Get_size(),
                                                      trace_sort=True)

    # In addition to the generating components, the ``sampler.run``
    # method automatically sends all samples to the master
    # process i.e. the process which fulfills comm.Get_rank() == 0.
    if comm.Get_rank() == 0:
        print("\rstep", i, "...\n\t", end='')

        # Now let PMC run only in the master process:

        # ``sampler.samples_list`` and ``sampler.weights_list`` store the weighted samples
        # sorted by the resposible process:
        # The History objects that are held by process i can be accessed via
        # ``sampler.<samples/weights>_list[i]``. The master process (i=0) also produces samples.
Beispiel #14
0
    n_common_d = np.sum(bulge_good & disc_good & disc_better)
    n_only_b = np.sum(bulge_good & (~disc_good))
    n_only_d = np.sum(disc_good & (~bulge_good))  
    n_check = n_only_b + n_only_d +  n_common_b + n_common_d    
    
    cat_final.write(bord_filename)
    print '%s  n_total=%d n_common=%d n_common_b=%d n_common_d=%d n_only_b=%d n_only_d=%d n_check=%d' % (bord_filename,n_total,n_common,n_common_b,n_common_d,n_only_b,n_only_d,n_check)

    return disc_ids, bulge_ids


import sys
if '--mpi' in sys.argv:
    from mpi4py.MPI import COMM_WORLD as world
    rank = world.Get_rank()
    size = world.Get_size()
else:
    rank = 0
    size = 1

def main():

    bulge_path = 'bulge/main/DES*'
    disc_path = 'disc/main/DES*'

    print "Running merge script."

    bulge_files = glob.glob(bulge_path)
    disc_files = glob.glob(disc_path)

    bulge_tiles = [os.path.split(b)[1] for b in bulge_files]
#! /usr/bin/env python3
# -*- coding: utf-8 -*-

import numpy as np
# import pylab as pl
import matplotlib.pyplot as pl
from pylab import *
from mpl_toolkits import mplot3d
import dbr
import timeit
from tools import mkdir
import sys
from mpi4py.MPI import COMM_WORLD as mpi
from matplotlib.backends.backend_pdf import PdfPages

size = mpi.Get_size()
rank = mpi.Get_rank()

args = np.loadtxt("args.txt")
job = int(sys.argv[1])
path = str(sys.argv[2])  # folder do którego zostaną zapisane wyniki

m = 0  # liczba miejsc wybranych spośród n odwiedzonych miejsc i gorszych od e
nep = 0  # liczba pszczół przydzielonych do najlepszych e miejsc
e = 0  # liczba najlepszych miejsc spośród wyselekcjonowanych n miejsc
nsp = 0  # liczba pszczół zrekrutowanych do m wybranych miejsc
n = int(args[job][0])  # liczba zwiadowców
Neighb = args[job][
    1]  # odległość przeszukiwania od najlepszego aktualnego rozwiązania
ngh = args[job][
    2]  # odległość przeszukiwania od rozwiązania bazowego przy przeszukiwaniu sąsiedztwa
Beispiel #16
0
def main():
    rank = CW.Get_rank()
    size = CW.Get_size()
    args = parse_inputs()
    prev_args = args
    print("Starting mosaic_mod_script on rank", rank)

    # Read in directories:
    input_file = args.input_file
    #save_dir = args.save_directory
    #if os.path.exists(save_dir) == False:
    #    os.makedirs(save_dir)

    # Read in input file
    print("Reading in input mosaic file on rank", rank)
    positions = []
    paths = []
    args_dict = []
    with open(input_file, 'rU') as mosaic_file:
        reader = csv.reader(mosaic_file)
        for row in reader:
            if row[0] == 'Grid_inputs:':
                glr = float(row[1])
                grl = float(row[2])
                glw = float(row[3])
                ghspace = float(row[4])
            elif row[0][0] != '#':
                positions.append((int(row[0]), int(row[1])))
                paths.append(row[2])
                dict = ""
                for col in row[3:]:
                    dict = dict + col
                    if col != row[-1]:
                        dict = dict + ','
                dict = ast.literal_eval(dict)
                args_temp = argparse.Namespace(**vars(args))
                for key in list(dict.keys()):
                    if key in args:
                        exec("args_temp."+ key + " = " + "str(dict[key])")
                args_dict.append(args_temp)
                del args_temp
                args = prev_args
                
    import pdb
    pdb.set_trace()

    positions = np.array(positions)

    c = define_constants()
    mym.set_global_font_size(args.text_font)
    files = []
    simfo = []
    X = []
    Y = []
    X_vel = []
    Y_vel = []
    sim_files = []
    L = None
    for pit in range(len(paths)):
        fs = get_files(paths[pit], args_dict[pit])
        files.append(fs)

        #print "paths =", paths
        #print "fs =", fs
        #print "args_dict =", args_dict
        sfo = sim_info(paths[pit], fs[-1], args_dict[pit])
        simfo.append(sfo)

        if args_dict[pit].yt_proj == False:
            x, y, x_vel, y_vel, cl = mym.initialise_grid(files[pit][-1], zoom_times=args_dict[pit].zoom_times)
            X.append(x)
            Y.append(y)
            X_vel.append(x_vel)
            Y_vel.append(y_vel)
        else:
            x = np.linspace(sfo['xmin'], sfo['xmax'], sfo['dimension'])
            y = np.linspace(sfo['ymin'], sfo['ymax'], sfo['dimension'])
            x, y  = np.meshgrid(x, y)
            
            annotate_space = (simfo[pit]['xmax'] - simfo[pit]['xmin'])/31.
            x_ind = []
            y_ind = []
            counter = 0
            while counter < 31:
                val = annotate_space*counter + annotate_space/2. + simfo[pit]['xmin']
                x_ind.append(int(val))
                y_ind.append(int(val))
                counter = counter + 1
            x_vel, y_vel = np.meshgrid(x_ind, y_ind)
            if args_dict[pit].projection_orientation != None:
                y_val = 1./np.tan(np.deg2rad(float(args_dict[pit].projection_orientation)))
                if np.isinf(y_val):
                    y_val = 0.0
                L = [1.0, y_val, 0.0]
            else:
                if has_particles == False or len(dd['particle_posx']) == 1:
                    L = [0.0, 1.0, 0.0]
                else:
                    pos_vec = [np.diff(dd['particle_posx'].value)[0], np.diff(dd['particle_posy'].value)[0]]
                    L = [-1*pos_vec[-1], pos_vec[0]]
                    L.append(0.0)
                    if L[0] > 0.0:
                        L = [-1.0*L[0], -1.0*L[1], 0.0]
            print("SET PROJECTION ORIENTATION L=", L)
            L = np.array(L)
            X.append(x)
            Y.append(y)
            X_vel.append(x_vel)
            Y_vel.append(y_vel)
        if rank == 0:
            print("shape of x, y", np.shape(x), np.shape(y))

        if args_dict[pit].yt_proj == False and args_dict[pit].image_center != 0:
            sim_fs = sorted(glob.glob(paths[pit] + 'WIND_hdf5_plt_cnt*'))
        elif args_dict[pit].yt_proj != False and args_dict[pit].image_center != 0:
            sim_fs = files
        else:
            sim_fs = []
        sim_files.append(sim_fs)
    #myf.set_normal(L)
    #print "SET PROJECTION ORIENTATION L=", myf.get_normal()

    # Initialise Grid and build lists
    if args.plot_time != None:
        m_times = [args.plot_time]
    else:
        m_times = mym.generate_frame_times(files[0], args.time_step, presink_frames=args.presink_frames, end_time=args.end_time)
    no_frames = len(m_times)
    m_times = m_times[args.start_frame:]
    sys.stdout.flush()
    CW.Barrier()

    usable_files = []
    usable_sim_files = []
    for pit in range(len(paths)):
        usable_fs = mym.find_files(m_times, files[pit])
        usable_files.append(usable_fs)
        if args_dict[pit].image_center != 0 and args_dict[pit].yt_proj == False:
            usable_sfs = mym.find_files(m_times, sim_files[pit])
            usable_sim_files.append(usable_fs)
            del sim_files[pit]
        else:
            usable_sim_files.append([])
    sys.stdout.flush()
    CW.Barrier()
    frames = list(range(args.start_frame, no_frames))

    sink_form_time = []
    for pit in range(len(paths)):
        sink_form = mym.find_sink_formation_time(files[pit])
        print("sink_form_time", sink_form_time)
        sink_form_time.append(sink_form)
    del files

    # Define colourbar bounds
    cbar_max = args.colourbar_max
    cbar_min = args.colourbar_min

    if L is None:
        if args.axis == 'xy':
            L = [0.0, 0.0, 1.0]
        else:
            L = [1.0, 0.0, 0.0]
        L = np.array(L)
    if args.axis == 'xy':
        y_int = 1
    else:
        y_int = 2

    sys.stdout.flush()
    CW.Barrier()
    rit = args.working_rank
    for frame_val in range(len(frames)):
        if rank == rit:
            time_val = m_times[frame_val]
            plt.clf()
            columns = np.max(positions[:,0])
            rows = np.max(positions[:,1])

            width = float(columns)*(14.5/3.)
            height = float(rows)*(17./4.)
            fig =plt.figure(figsize=(width, height))
            
            gs_left = gridspec.GridSpec(rows, columns-1)
            gs_right = gridspec.GridSpec(rows, 1)

            gs_left.update(right=glr, wspace=glw, hspace=ghspace)
            gs_right.update(left=grl, hspace=ghspace)
            
            axes_dict = {}
            counter = 1

            for pit in range(len(paths)):
                
                try:
                    title_parts = args_dict[pit].title
                except:
                    title_parts = args_dict[pit]['title']
                title = ''
                for part in title_parts:
                    if part != title_parts[-1]:
                        title = title + part + ' '
                    else:
                        title = title + part
            
                ax_label = 'ax' + str(counter)
                yit = np.where(positions[:,1] == positions[pit][1])[0][0]
                if positions[pit][0] == 1 and positions[pit][1] == 1:
                    if columns > 1:
                        axes_dict.update({ax_label:fig.add_subplot(gs_left[0,0])})
                        #print "ADDED SUBPLOT:", counter, "on rank", rank
                    else:
                        axes_dict.update({ax_label:fig.add_subplot(gs_right[0,0])})
                        #print "ADDED SUBPLOT:", counter, "on rank", rank
                elif positions[pit][0] != columns:
                    if args.share_x and args.share_y:
                        if yit >= len(axes_dict):
                            axes_dict.update({ax_label:fig.add_subplot(gs_left[positions[pit][1]-1,positions[pit][0]-1], sharex=axes_dict['ax1'])})
                            #print "ADDED SUBPLOT:", counter, "on rank", rank
                        else:
                            axes_dict.update({ax_label:fig.add_subplot(gs_left[positions[pit][1]-1,positions[pit][0]-1], sharex=axes_dict['ax1'], sharey=axes_dict[list(axes_dict.keys())[yit]])})
                            #print "ADDED SUBPLOT:", counter, "on rank", rank
                    elif args.share_x:
                        axes_dict.update({ax_label:fig.add_subplot(gs_left[positions[it][1]-1,positions[pit][0]-1], sharex=axes_dict['ax1'])})
                        #print "ADDED SUBPLOT:", counter, "on rank", rank
                    elif args.share_y and positions[pit][0]!=1:
                        yit = np.where(positions[:,1] == positions[pit][1])[0][0]
                        axes_dict.update({ax_label:fig.add_subplot(gs_left[positions[pit][1]-1,positions[pit][0]-1], sharey=axes_dict[list(axes_dict.keys())[yit]])})
                        #print "ADDED SUBPLOT:", counter, "on rank", rank
                    elif args.share_y:
                        axes_dict.update({ax_label:fig.add_subplot(gs_left[positions[pit][1]-1,positions[pit][0]-1])})
                        #print "ADDED SUBPLOT:", counter, "on rank", rank
                    else:
                        axes_dict.update({ax_label:fig.add_subplot(gs_left[positions[pit][1]-1,positions[pit][0]-1])})
                        #print "ADDED SUBPLOT:", counter, "on rank", rank
                else:
                    if args.share_x and args.share_y:
                        yit = np.where(positions[:,1] == positions[pit][1])[0][0]
                        axes_dict.update({ax_label:fig.add_subplot(gs_right[positions[pit][1]-1,0], sharex=axes_dict['ax1'], sharey=axes_dict[list(axes_dict.keys())[yit]])})
                        #print "ADDED SUBPLOT:", counter, "on rank", rank
                    elif args.share_x:
                        axes_dict.update({ax_label:fig.add_subplot(gs_right[positions[pit][1]-1,0], sharex=axes_dict['ax1'])})
                        #print "ADDED SUBPLOT:", counter, "on rank", rank
                    elif args.share_y:
                        yit = np.where(positions[:,1] == positions[pit][1])[0][0]
                        axes_dict.update({ax_label:fig.add_subplot(gs_right[positions[pit][1]-1,0], sharey=axes_dict[list(axes_dict.keys())[yit]])})
                        #print "ADDED SUBPLOT:", counter, "on rank", rank
                    else:
                        axes_dict.update({ax_label:fig.add_subplot(gs_right[positions[pit][1]-1,0])})
                        #print "ADDED SUBPLOT:", counter, "on rank", rank

                counter = counter + 1
                axes_dict[ax_label].set(adjustable='box-forced', aspect='equal')
                

                if args.yt_proj and args.plot_time==None and os.path.isfile(paths[pit] + "movie_frame_" + ("%06d" % frames[frame_val]) + ".pkl"):
                    pickle_file = paths[pit] + "movie_frame_" + ("%06d" % frames[frame_val]) + ".pkl"
                    print("USING PICKLED FILE:", pickle_file)
                    file = open(pickle_file, 'r')
                    #weight_fieldstuff = pickle.load(file)
                    X[pit], Y[pit], image, magx, magy, X_vel[pit], Y_vel[pit], velx, vely, part_info, args_dict[pit], simfo[pit] = pickle.load(file)

                    #file_time = stuff[17]
                    file.close()

                else:
                    time_val = m_times[frame_val]
                    print("FILE =", usable_files[pit][frame_val])
                    has_particles = has_sinks(usable_files[pit][frame_val])
                    if has_particles:
                        part_info = mym.get_particle_data(usable_files[pit][frame_val], args_dict[pit].axis, proj_or=L)
                    else:
                        part_info = {}
                    center_vel = [0.0, 0.0, 0.0]
                    if args.image_center != 0 and has_particles:
                        original_positions = [X[pit], Y[pit], X_vel[pit], y_vel[pit]]
                        x_pos = np.round(part_info['particle_position'][0][args.image_center - 1]/cl)*cl
                        y_pos = np.round(part_info['particle_position'][1][args.image_center - 1]/cl)*cl
                        pos = np.array([part_info['particle_position'][0][args.image_center - 1], part_info['particle_position'][1][args.image_center - 1]])
                        X[pit] = X[pit] + x_pos
                        Y[pit] = Y[pit] + y_pos
                        X_vel[pit] = X_vel[pit] + x_pos
                        Y_vel[pit] = Y_vel[pit] + y_pos
                        if args.yt_proj == False:
                            sim_file = usable_sim_files[frame_val][:-12] + 'part' + usable_sim_files[frame_val][-5:]
                        else:
                            sim_file = part_file
                        if len(part_info['particle_mass']) == 1:
                            part_ind = 0
                        else:
                            min_dist = 1000.0
                            for part in range(len(part_info['particle_mass'])):
                                f = h5py.File(sim_file, 'r')
                                temp_pos = np.array([f[list(f.keys())[11]][part][13]/c['au'], f[list(f.keys())[11]][part][13+y_int]/c['au']])
                                f.close()
                                dist = np.sqrt(np.abs(np.diff((temp_pos - pos)**2)))[0]
                                if dist < min_dist:
                                    min_dist = dist
                                    part_ind = part
                        f = h5py.File(sim_file, 'r')
                        center_vel = [f[list(f.keys())[11]][part_ind][18], f[list(f.keys())[11]][part_ind][19], f[list(f.keys())[11]][part_ind][20]]
                        f.close()
                    xabel, yabel, xlim, ylim = image_properties(X[pit], Y[pit], args_dict[pit], simfo[pit])
                    if args_dict[pit].axis == 'xy':
                        center_vel=center_vel[:2]
                    else:
                        center_vel=center_vel[::2]
                    
                    if args_dict[pit].ax_lim != None:
                        if has_particles and args_dict[pit].image_center != 0:
                            xlim = [-1*args_dict[pit].ax_lim + part_info['particle_position'][0][args_dict[pit].image_center - 1], args_dict[pit].ax_lim + part_info['particle_position'][0][args_dict[pit].image_center - 1]]
                            ylim = [-1*args_dict[pit].ax_lim + part_info['particle_position'][1][args_dict[pit].image_center - 1], args_dict[pit].ax_lim + part_info['particle_position'][1][args_dict[pit].image_center - 1]]
                        else:
                            xlim = [-1*args_dict[pit].ax_lim, args_dict[pit].ax_lim]
                            ylim = [-1*args_dict[pit].ax_lim, args_dict[pit].ax_lim]

                    if args.yt_proj == False:
                        f = h5py.File(usable_files[pit][frame_val], 'r')
                        image = get_image_arrays(f, simfo[pit]['field'], simfo[pit], args_dict[pit], X[pit], Y[pit])
                        magx = get_image_arrays(f, 'mag'+args.axis[0]+'_'+simfo[pit]['movie_file_type']+'_'+args.axis, simfo[pit], args_dict[pit], X[pit], Y[pit])
                        magy = get_image_arrays(f, 'mag'+args.axis[1]+'_'+simfo[pit]['movie_file_type']+'_'+args.axis, simfo[pit], args_dict[pit], X[pit], Y[pit])
                        x_pos_min = int(np.round(np.min(X[pit]) - simfo[pit]['xmin_full'])/simfo[pit]['cell_length'])
                        y_pos_min = int(np.round(np.min(Y[pit]) - simfo[pit]['xmin_full'])/simfo[pit]['cell_length'])
                        if np.shape(f['vel'+args.axis[0]+'_'+simfo[pit]['movie_file_type']+'_'+args.axis]) == (2048, 2048):
                            velocity_data = [f['vel'+args.axis[0]+'_'+simfo[pit]['movie_file_type']+'_'+args.axis], f['vel'+args.axis[1]+'_'+simfo[pit]['movie_file_type']+'_'+args.axis]]
                        elif args.axis == 'xy':
                            velocity_data = [f['vel'+args.axis[0]+'_'+simfo[pit]['movie_file_type']+'_'+args.axis][:,:,0], f['vel'+args.axis[1]+'_'+simfo[pit]['movie_file_type']+'_'+args.axis][:,:,0]]
                        else:
                            velocity_data = [f['vel'+args.axis[0]+'_'+simfo[pit]['movie_file_type']+'_'+args.axis][:,0,:], f['vel'+args.axis[1]+'_'+simfo[pit]['movie_file_type']+'_'+args.axis][:,0,:]]
                        velx, vely = mym.get_quiver_arrays(y_pos_min, x_pos_min, X[pit], velocity_data[0], velocity_data[1], center_vel=center_vel)
                    else:
                        if args_dict[pit].image_center == 0 or has_particles == False:
                            center_pos = np.array([0.0, 0.0, 0.0])
                        else:
                            dd = f.all_data()
                            center_pos = np.array([dd['particle_posx'][args.image_center-1].in_units('AU'), dd['particle_posy'][args.image_center-1].in_units('AU'), dd['particle_posz'][args.image_center-1].in_units('AU')])
                        x_width = (xlim[1] -xlim[0])
                        y_width = (ylim[1] -ylim[0])
                        thickness = yt.YTArray(args.slice_thickness, 'AU')
                        
                        proj = yt.OffAxisProjectionPlot(f, L, [simfo[pit]['field'], 'cell_mass', 'velz_mw', 'magz_mw', 'Projected_Magnetic_Field_mw', 'Projected_Velocity_mw'], center=(center_pos, 'AU'), width=(x_width, 'AU'), depth=(args.slice_thickness, 'AU'))
                        image = (proj.frb.data[simfo[pit]['field']]/thickness.in_units('cm')).value
                        velx_full = (proj.frb.data[('gas', 'Projected_Velocity_mw')].in_units('g*cm**2/s')/thickness.in_units('cm')).value
                        vely_full = (proj.frb.data[('gas', 'velz_mw')].in_units('g*cm**2/s')/thickness.in_units('cm')).value
                        magx = (proj.frb.data[('gas', 'Projected_Magnetic_Field_mw')].in_units('g*gauss*cm')/thickness.in_units('cm')).value
                        magy = (proj.frb.data[('gas', 'magz_mw')].in_units('g*gauss*cm')/thickness.in_units('cm')).value
                        mass = (proj.frb.data[('gas', 'cell_mass')].in_units('cm*g')/thickness.in_units('cm')).value
                        
                        velx_full = velx_full/mass
                        vely_full = vely_full/mass
                        magx = magx/mass
                        magy = magy/mass
                        del mass

                        velx, vely = mym.get_quiver_arrays(0.0, 0.0, X[pit], velx_full, vely_full, center_vel=center_vel)
                        del velx_full
                        del vely_full

                        if len(frames) == 1:
                            if rank == 0:
                                pickle_file = paths[pit] + "movie_frame_" + ("%06d" % frames[frame_val]) + ".pkl"
                                file = open(pickle_file, 'w+')
                                pickle.dump((X[pit], Y[pit], image, magx, magy, X_vel[pit], Y_vel[pit], velx, vely, xlim, ylim, has_particles, part_info, simfo[pit], time_val,xabel, yabel), file)
                                file.close()
                                print("Created Pickle:", pickle_file, "for  file:", usable_files[pit][frame_val])
                        else:
                            pickle_file = paths[pit] + "movie_frame_" + ("%06d" % frames[frame_val]) + ".pkl"
                            file = open(pickle_file, 'w+')
                            pickle.dump((X[pit], Y[pit], image, magx, magy, X_vel[pit], Y_vel[pit], velx, vely, xlim, ylim, has_particles, part_info, simfo[pit], time_val,xabel, yabel), file)
                            file.close()
                            print("Created Pickle:", pickle_file, "for  file:", usable_files[pit][frame_val])
                    
                    f.close()

                plot = axes_dict[ax_label].pcolormesh(X[pit], Y[pit], image, cmap=plt.cm.gist_heat, norm=LogNorm(vmin=cbar_min, vmax=cbar_max), rasterized=True)
                plt.gca().set_aspect('equal')
                if frame_val > 0 or time_val > -1.0:
                    axes_dict[ax_label].streamplot(X[pit], Y[pit], magx, magy, density=4, linewidth=0.25, arrowstyle='-', minlength=0.5)
                else:
                    axes_dict[ax_label].streamplot(X[pit], Y[pit], magx, magy, density=4, linewidth=0.25, minlength=0.5)

                xlim = args_dict[pit]['xlim']
                ylim = args_dict[pit]['ylim']
                mym.my_own_quiver_function(axes_dict[ax_label], X_vel[pit], Y_vel[pit], velx, vely, plot_velocity_legend=bool(args_dict[pit]['annotate_velocity']), limits=[xlim, ylim], standard_vel=args.standard_vel)
                if args_dict[pit]['has_particles']:
                    if args.annotate_particles_mass == True:
                        mym.annotate_particles(axes_dict[ax_label], part_info['particle_position'], part_info['accretion_rad'], limits=[xlim, ylim], annotate_field=part_info['particle_mass'])
                    else:
                        mym.annotate_particles(axes_dict[ax_label], part_info['particle_position'], part_info['accretion_rad'], limits=[xlim, ylim], annotate_field=None)
                if args.plot_lref == True:
                    r_acc = np.round(part_info['accretion_rad'])
                    axes_dict[ax_label].annotate('$r_{acc}$='+str(r_acc)+'AU', xy=(0.98*simfo[pit]['xmax'], 0.93*simfo[pit]['ymax']), va="center", ha="right", color='w', fontsize=args_dict[pit].text_font)
                if args.annotate_time == "True" and pit == 0:
                    print("ANNONTATING TIME:", str(int(time_val))+'yr')
                    time_text = axes_dict[ax_label].text((xlim[0]+0.01*(xlim[1]-xlim[0])), (ylim[1]-0.03*(ylim[1]-ylim[0])), '$t$='+str(int(time_val))+'yr', va="center", ha="left", color='w', fontsize=args.text_font)
                    time_text.set_path_effects([path_effects.Stroke(linewidth=3, foreground='black'), path_effects.Normal()])
                    #ax.annotate('$t$='+str(int(time_val))+'yr', xy=(xlim[0]+0.01*(xlim[1]-xlim[0]), ylim[1]-0.03*(ylim[1]-ylim[0])), va="center", ha="left", color='w', fontsize=args.text_font)
                title_text = axes_dict[ax_label].text((np.mean(xlim)), (ylim[1]-0.03*(ylim[1]-ylim[0])), title, va="center", ha="center", color='w', fontsize=(args.text_font+2))
                title_text.set_path_effects([path_effects.Stroke(linewidth=3, foreground='black'), path_effects.Normal()])

                if positions[pit][0] == columns:
                    cbar = plt.colorbar(plot, pad=0.0, ax=axes_dict[ax_label])
                    cbar.set_label('Density (gcm$^{-3}$)', rotation=270, labelpad=14, size=args.text_font)
                axes_dict[ax_label].set_xlabel(args_dict[pit]['xabel'], labelpad=-1, fontsize=args.text_font)
                if positions[pit][0] == 1:
                    axes_dict[ax_label].set_ylabel(args_dict[pit]['yabel'], labelpad=-20, fontsize=args.text_font)
                axes_dict[ax_label].set_xlim(xlim)
                axes_dict[ax_label].set_ylim(ylim)
                for line in axes_dict[ax_label].xaxis.get_ticklines():
                    line.set_color('white')
                for line in axes_dict[ax_label].yaxis.get_ticklines():
                    line.set_color('white')

                plt.tick_params(axis='both', which='major', labelsize=16)
                for line in axes_dict[ax_label].xaxis.get_ticklines():
                    line.set_color('white')
                for line in axes_dict[ax_label].yaxis.get_ticklines():
                    line.set_color('white')

                if positions[pit][0] != 1:
                    yticklabels = axes_dict[ax_label].get_yticklabels()
                    plt.setp(yticklabels, visible=False)

                if positions[pit][0] == 1:
                    axes_dict[ax_label].tick_params(axis='y', which='major', labelsize=args.text_font)
                if positions[pit][1] == rows:
                    axes_dict[ax_label].tick_params(axis='x', which='major', labelsize=args.text_font)
                    if positions[pit][0] != 1:
                        xticklabels = axes_dict[ax_label].get_xticklabels()
                        plt.setp(xticklabels[0], visible=False)

                if len(usable_files[pit]) > 1:
                    if args.output_filename == None:
                        import pdb
                        pdb.set_trace()
                        file_name = save_dir + "movie_frame_" + ("%06d" % frames[frame_val])
                    else:
                        file_name = args.output_filename + "_" + str(int(time_val))
                else:
                    if args.output_filename != None:
                        file_name = args.output_filename
                    else:
                        import pdb
                        pdb.set_trace()
                        file_name = save_dir + "time_" + str(args.plot_time)

                plt.savefig(file_name + ".eps", format='eps', bbox_inches='tight')
                #plt.savefig(file_name + ".pdf", format='pdf', bbox_inches='tight')
                
                #plt.savefig(file_name + ".jpg", format='jpeg', bbox_inches='tight')
                call(['convert', '-antialias', '-quality', '100', '-density', '200', '-resize', '100%', '-flatten', file_name+'.eps', file_name+'.jpg'])
                os.remove(file_name + '.eps')

                del image
                del magx
                del magy
                del velx
                del vely
                
                if args.image_center != 0 and has_particles:
                    X[pit], Y[pit], X_vel[pit], Y_vel[pit] = original_positions
            print('Created frame', (frames[frame_val]), 'of', str(frames[-1]), 'on rank', rank, 'at time of', str(time_val), 'to save_dir:', file_name + '.eps')

        rit = rit +1
        if rit == size:
            rit = 0

    print("completed making movie frames on rank", rank)
Beispiel #17
0
import os
import logging
import itertools
import pickle
import json
import warnings
from glob import glob

import tables

import numpy as np
import mdtraj as md

from mpi4py.MPI import COMM_WORLD as mpi_comm

if mpi_comm.Get_size() > 1:
    RANKSTR = "[Rank %s]" % mpi_comm.Get_rank()
    logging.basicConfig(
        level=logging.DEBUG if mpi_comm.Get_rank() == 0 else logging.INFO,
        format=('%(asctime)s ' + RANKSTR +
                ' %(name)-26s %(levelname)-7s %(message)s'),
        datefmt='%m-%d-%Y %H:%M:%S')
    mpi_mode = True
else:
    logging.basicConfig(
        level=logging.INFO,
        format=('%(asctime)s %(name)-8s %(levelname)-7s %(message)s'),
        datefmt='%m-%d-%Y %H:%M:%S')
    mpi_mode = False

from enspara.apps.reassign import reassign
Beispiel #18
0
 def __init__(self):
     self.f = None
     self.P = COMM_WORLD.Get_size()
     self.rank = COMM_WORLD.Get_rank()
Beispiel #19
0
def get_world_size(comm: MPI.COMM_WORLD = None) -> int:
    if comm is None:
        comm = _get_comm()
    return comm.Get_size()
Beispiel #20
0
def main():

    args = parse_inputs()
    center = int(args.center)
    if args.adaptive_bins == 'False':
        args.adaptive_bins = False
    a = args.semimajor_axis
    max_radius = args.max_r

    file = open(args.file, 'r')
    loaded_fields = pickle.load(file)
    distance = loaded_fields[0]
    cell_mass = loaded_fields[1]
    part_mass = loaded_fields[2]

    rank = CW.Get_rank()
    size = CW.Get_size()

    dist_min = np.min(distance)
    dist_max = np.max(distance)
    if args.adaptive_bins:
        rs = [0]
        rs = rs + list(set(distance[distance <= max_radius]))
        rs = np.sort(rs)
        bin_freq = len(rs) / 500
        if bin_freq > 0:
            rs = rs[::bin_freq]
        rs = np.array(rs)
        rs = np.append(rs, max_radius)
    else:
        rs = np.linspace(0.0, max_radius, args.no_of_bins)
    bin_size = rs[-1] - rs[-2]
    #print "bin_size =", bin_size/1.49597870751e+13
    #print "max_radius =", max_radius/1.49597870751e+13
    rs = np.append(rs, rs[-1] + bin_size)
    enclosed_mass = np.array(np.zeros(np.shape(distance)))

    rit = 1
    printed = False
    print_cen = False
    for r in range(len(rs))[1:]:
        if rank == rit:
            enclosed_mass = np.array(np.zeros(np.shape(distance)))
            ind = np.where((distance >= rs[r - 1]) & (distance < rs[r]))[0]
            enclosed_dist = np.where(distance < rs[r - 1])[0]
            if len(enclosed_dist) == 0:
                enclosed_dist = np.where(distance < (dist_min + 1))
            enclosed_mass_val = np.sum(cell_mass[enclosed_dist])
            if center != 0:
                enclosed_mass_val = enclosed_mass_val + part_mass[center - 1]
                if print_cen == False:
                    #print "Centered on particle with mass", part_mass[center-1]/1.98841586e+33
                    print_cen = True
            if center != 0 and rs[r] > a and len(part_mass) > 1:
                if center == 1:
                    enclosed_mass_val = enclosed_mass_val + part_mass[1]
                else:
                    enclosed_mass_val = enclosed_mass_val + part_mass[0]
                if printed == False:
                    #print "Added other particle with mass", part_mass[0]/1.98841586e+33
                    printed = True
            elif center == 0 and rs[r] > a / 2. and len(part_mass) > 0:
                enclosed_mass_val = enclosed_mass_val + np.sum(part_mass)
                if printed == False:
                    #print "Added both particles with mass", np.sum(part_mass)/1.98841586e+33
                    printed = True
            enclosed_mass[ind] = enclosed_mass_val
            #print "enclosed mass =", enclosed_mass_val/1.98841586e+33, ", Radius =", rs[r]/1.49597870751e+13, "on rank", rank
            CW.send(enclosed_mass, dest=0, tag=rank)
        if rank == 0:
            enclosed_mass_add = CW.recv(source=rit, tag=rit)
            enclosed_mass = enclosed_mass + enclosed_mass_add
        rit = rit + 1
        if rit == size:
            rit = 1

    if rank == 0:
        os.remove(args.file)
        file = open(args.save_file, 'w+')
        print("pickle file:", args.save_file)
        pickle.dump(enclosed_mass, file)
        file.close()
Beispiel #21
0
    th_e = mp.mpf(th_e)
    th_i = mp.mpf(tmp)

    tmp = (1/(mp.besselk(2, 1/th_e) * mp.besselk(2, 1/th_i))) * (((2*(th_e + th_i)**2 + 1)/(th_e + th_i)) * mp.besselk(1, (th_e + th_i)/(th_e*th_i)) + 2*mp.besselk(0, (th_e + th_i)/(th_e*th_i)))

#   print repr(tmp)

    return float(tmp)

def eqn(th_e, A, B, C):
    return ((3.0/8.0)*(m_e/m_i)*ln_Lambda)*(A - (m_e/m_i)*(1.0 + chi)*th_e)*f(th_e, A) - B*th_e*(1.0 + 4.0*th_e) + (1.0/4.0)*B*C

# ----------+---------- start: program execution ----------+----------

rank = COMM_WORLD.Get_rank()
size = COMM_WORLD.Get_size()

print 'hello from ' + repr(rank)
sys.stdout.flush()
sys.stderr.flush()

A_grid = np.logspace(-15, 15, 301, endpoint = True)
B_grid = np.logspace(-15, 15, 301, endpoint = True)
C_grid = np.logspace(-6,  1,  71,  endpoint = True)
C_grid = np.insert(C_grid, 0, 0.0)

# ----------+---------- root process ----------+----------
if (rank == 0):
    # start the clock
    start_time = time.time()
Beispiel #22
0
def main():

    rank = CW.Get_rank()
    size = CW.Get_size()

    args = parse_inputs()

    n_orb = int(args.no_orbits)
    n_systems = int(args.no_systems)
    q_min = 0.05
    my_orb = bo.random_orbits(n_orb=n_orb)
    US_group_vel = 10.
    UCL_group_vel = 4.
    #Madsen, 2002 gives the STANDARD ERROR of the US and UCL velcs to be 1.3 and 1.9km/s
    US_group_std = 1.3 * args.group_velocity_sigma  #From Preibisch et al., 2008
    UCL_group_std = 1.3 * args.group_velocity_sigma
    standard_std = {'F': 1.08, 'G': 0.63, 'K': 1.43, 'M': 2.27}  # 2.0
    astrophysical_std = args.astrophysical_std  #Astrophysical radial velocity uncertainty

    Object = []
    Region = []
    IR_excess = []
    Temp_sptype = []
    Pref_template = []
    Obs_info = []
    all_bayes = [[], []]

    RV_standard_info = {}

    sys.stdout.flush()
    CW.Barrier()

    #Read in RV standard list
    header = 0
    with open('/home/100/rlk100/RV_standard_list.csv', 'rU') as f:
        reader = csv.reader(f)
        for row in reader:
            if header != 0:
                RV_standard_info[row[0]] = (float(row[5]), float(row[6]),
                                            float(row[7]))
            else:
                header = 1
        f.close()

    sys.stdout.flush()
    CW.Barrier()

    print("Reading in current spreadsheet", args.input_file)
    header = 0
    reshape_len = -1
    with open(args.input_file, 'rU') as f:
        reader = csv.reader(f)
        for row in reader:
            if header != 0:
                if 'U4' in row[0]:
                    row[0] = 'UCAC4' + row[0].split('U4')[-1]
                Object.append(row[0])
                Region.append(row[1])
                IR_excess.append(row[5])
                Pref_template.append(row[15])  #row[18])
                Temp_sptype.append(row[16])  #row[19])
                if len(row) > 17:
                    Obs = np.array(row[17:])
                    Obs = np.delete(Obs, np.where(Obs == ''))
                    if reshape_len == -1:
                        for ob in Obs:
                            reshape_len = reshape_len + 1
                            if '/' in ob and ob != Obs[0]:
                                break
                    #if len(Obs) > 5:
                    #    Obs = np.reshape(Obs, (len(Obs)/5, 5))
                    Obs = np.reshape(Obs,
                                     (len(Obs) / reshape_len, reshape_len))
                    for ind_obs in Obs:
                        if '/' in ind_obs[0]:
                            new_format = '20' + ind_obs[0].split('/')[
                                -1] + '-' + ind_obs[0].split('/')[-2] + '-' + (
                                    "%02d" % int(ind_obs[0].split('/')[-3]))
                            ind_obs[0] = new_format
                else:
                    Obs = np.array([])
                Obs_info.append(Obs)
            if header == 0:
                header = 1
        f.close()
    del header

    sys.stdout.flush()
    CW.Barrier()

    Obj_bayes = np.nan * np.zeros(len(Object))

    #Read in currently calculated Bayes Factors:
    if args.restart_calc != 'False':
        print("Reading in calulated Bayes factors")
        header = 0
        with open(args.bayes_file, 'rU') as f:
            reader = csv.reader(f)
            for row in reader:
                if header != 0:
                    ind = Object.index(row[0])
                    Obj_bayes[ind] = float(row[2])
                    if row[1] == 'US':
                        all_bayes[0].append(float(row[2]))
                    else:
                        all_bayes[1].append(float(row[2]))
                    del ind
                else:
                    header = 1
            f.close()
        del header

    sys.stdout.flush()
    CW.Barrier()

    if args.restart_calc != 'False' and rank == 0:
        print("Creating new bayes file")
        f = open(args.bayes_file, 'w')
        f.write('Object,Region,Bayes_factor\n')
        f.close()

    sys.stdout.flush()
    CW.Barrier()

    inds = list(range(len(Object)))
    skip_inds = np.where(np.array(IR_excess) == 'NN')[0]
    for skit in skip_inds:
        inds.remove(skit)
    skip_inds = np.where(np.array(Pref_template) == '')[0]
    for skit in skip_inds:
        inds.remove(skit)
    del skip_inds
    del IR_excess

    rit = 0
    sys.stdout.flush()
    CW.Barrier()
    for obj in inds:
        Pref_template_name = Pref_template[obj].split('_')[0]
        if np.isnan(Obj_bayes[obj]) and rank == rit:
            print("Doing object:", Object[obj], "on rank:", rank)
            likelihoods = []
            single_likelihoods = []

            #Produces masses within +/- 10% of the mass of the template.
            #!!! Mike suggests a single mass.
            M_1 = (np.random.random(n_systems) *
                   (RV_standard_info[Pref_template_name][1] -
                    RV_standard_info[Pref_template_name][0])
                   ) + RV_standard_info[Pref_template_name][0]

            #Generates mass ratios with minium mass ratio of q_min (default 0.01?, should this be dependant on the primary mass? Because sometimes low mass ratios could give very low mass companions i.e. BD mass...)
            #!!! Mike suggests 0.05 due to brown dwarf desert.
            q = (np.random.random(n_systems) * (1 - q_min)) + q_min

            #from Primary masses and mass ratios, secondary masses can get calculated
            M_2 = M_1 * q

            #Get dates of the observations of the object
            jds = Obs_info[obj][:, 1].astype(np.float)

            #get observed data, and add in the error in the standards in quadrature.
            #This relates to the spectrograph stability
            #There is also an astrophysical error due to these objects being rapid rotators etc.
            RV_standard_err = standard_std[Temp_sptype[obj][0]]
            err = np.sqrt(Obs_info[obj][:, 3].astype(float)**2 +
                          RV_standard_err**2 + astrophysical_std**2)
            observed_rv = Obs_info[obj][:, 2].astype(float)

            #IN A LOOP iterate over random orbits:
            for orb in range(n_orb):
                #FIXME: Figure out which velocity to use!
                if Region[obj] == 'US':
                    if args.group_velocity == 'True':
                        v_group = np.random.normal(
                            US_group_vel,
                            np.sqrt(US_group_std**2 + RV_standard_err**2),
                            n_systems)
                    else:
                        v_group = np.random.normal(
                            np.mean(observed_rv),
                            np.sqrt(US_group_std**2 + RV_standard_err**2),
                            n_systems)
                else:
                    if args.group_velocity == 'True':
                        v_group = np.random.normal(
                            UCL_group_vel,
                            np.sqrt(UCL_group_std**2 + RV_standard_err**2),
                            n_systems)
                    else:
                        v_group = np.random.normal(
                            np.mean(observed_rv),
                            np.sqrt(UCL_group_std**2 + RV_standard_err**2),
                            n_systems)

                #generate orbit?
                #!!! Find just one set of orbital parameters at at a time, and
                #scale the RVS. OR if you really want you can compute a, i etc
                #yourself and plug these into my_orb, but some RV scalign is still needed.
                rho, theta, normalised_vr = bo.binary_orbit(my_orb,
                                                            jds,
                                                            plot_orbit_no=orb)
                for system in range(n_systems):
                    actual_vr = bo.scale_rv(normalised_vr,
                                            my_orb['P'][orb],
                                            M_1[system],
                                            M_2[system],
                                            my_orb['i'][orb],
                                            group_velocity=v_group[system])

                    this_likelihood = bo.calc_likelihood(
                        actual_vr, observed_rv, err)
                    likelihoods.append(this_likelihood)
                    #THEN CALCULATE PROBABILITY OF BEING A SINGLE STAR
                    single_likelihoods.append(
                        bo.calc_likelihood(v_group[system], observed_rv, err))
                    del actual_vr
                    del this_likelihood
                del v_group
            del M_1
            del q
            del M_2
            del jds
            del RV_standard_err
            del err
            del observed_rv

            #THEN CALCULATE BAYES FACTOR
            bayes_factor = np.mean(likelihoods) / np.mean(single_likelihoods)
            print(("Bayes Factor: {0:5.2f} for ".format(bayes_factor) +
                   Object[obj]), "on rank", rank, "with SpT", Temp_sptype[obj])
            del likelihoods
            del single_likelihoods
            if Region[obj] == 'US':
                send_data = [0.0, float(obj), bayes_factor, Temp_sptype[obj]]
                #print "Sending data:", send_data, "from rank:", rank
                if rank == 0:
                    bayes_update = send_data
                else:
                    CW.send(send_data, dest=0, tag=rank)
            else:
                send_data = [1.0, float(obj), bayes_factor, Temp_sptype[obj]]
                #print "Sending data:", send_data, "from rank:", rank
                if rank == 0:
                    bayes_update = send_data
                else:
                    CW.send(send_data, dest=0, tag=rank)
            del send_data
            if rank == 0:
                all_bayes[int(bayes_update[0])].append(bayes_update[2])
                Obj_bayes[int(bayes_update[1])] = bayes_update[2]
                print("Updated Bayes factors retrieved from rank 0 for object",
                      Object[int(bayes_update[1])])
                f = open(args.bayes_file, 'a')
                write_string = Object[int(bayes_update[1])] + ',' + Region[int(
                    bayes_update[1])] + ',' + str(bayes_update[2]) + ',' + str(
                        bayes_update[3]) + '\n'
                f.write(write_string)
                f.close()
                del bayes_update
                del write_string

        rit = rit + 1
        if rit == size:
            sys.stdout.flush()
            CW.Barrier()
            rit = 0
            if rank == 0:

                print("UPDATING CALCULATED BAYES VALUES")
                for orit in range(1, size):
                    bayes_update = CW.recv(source=orit, tag=orit)
                    all_bayes[int(bayes_update[0])].append(bayes_update[2])
                    Obj_bayes[int(bayes_update[1])] = bayes_update[2]
                    print("Updated Bayes factors retrieved from rank", orit,
                          "for object", Object[int(bayes_update[1])])
                    f = open(args.bayes_file, 'a')
                    write_string = Object[int(
                        bayes_update[1])] + ',' + Region[int(
                            bayes_update[1])] + ',' + str(
                                bayes_update[2]) + ',' + str(
                                    bayes_update[3]) + '\n'
                    f.write(write_string)
                    f.close()
                    del bayes_update
                    del write_string
            sys.stdout.flush()
            CW.Barrier()

    sys.stdout.flush()
    CW.Barrier()
    if rank == 0:

        print("UPDATING CALCULATED BAYES VALUES")
        for orit in range(1, size):
            bayes_update = CW.recv(source=orit, tag=orit)
            all_bayes[int(bayes_update[0])].append(bayes_update[2])
            Obj_bayes[int(bayes_update[1])] = bayes_update[2]
            print("Updated Bayes factors retrieved from rank", orit,
                  "for object", Object[int(bayes_update[1])])
            f = open(args.bayes_file, 'a')
            write_string = Object[int(bayes_update[1])] + ',' + Region[int(
                bayes_update[1])] + ',' + str(bayes_update[2]) + ',' + str(
                    bayes_update[3]) + '\n'
            f.write(write_string)
            f.close()
            del bayes_update
            del write_string
        sys.stdout.flush()
        CW.Barrier()
    print("Finished Calculating bayes factors!")
Beispiel #23
0
def main():
    args = parse_args()
    assert (not args.pretrained_model_path
            or args.pretrained_model_path.endswith(".ckpt"))
    os.makedirs(args.save_dir, exist_ok=True)
    save_args(args, args.save_dir)
    set_seed(args.seed + COMM_WORLD.Get_rank() * 100)
    nprocs = COMM_WORLD.Get_size()

    # Initialize model and agent policy
    aurora = Aurora(
        args.seed + COMM_WORLD.Get_rank() * 100,
        args.save_dir,
        int(args.val_freq / nprocs),
        args.pretrained_model_path,
        tensorboard_log=args.tensorboard_log,
    )
    # training_traces, validation_traces,
    training_traces = []
    val_traces = []
    if args.curriculum == "udr":
        config_file = args.config_file
        if args.train_trace_file:
            with open(args.train_trace_file, "r") as f:
                for line in f:
                    line = line.strip()
                    training_traces.append(Trace.load_from_file(line))

        if args.validation and args.val_trace_file:
            with open(args.val_trace_file, "r") as f:
                for line in f:
                    line = line.strip()
                    if args.dataset == "pantheon":
                        queue = 100  # dummy value
                        val_traces.append(
                            Trace.load_from_pantheon_file(line,
                                                          queue=queue,
                                                          loss=0))
                    elif args.dataset == "synthetic":
                        val_traces.append(Trace.load_from_file(line))
                    else:
                        raise ValueError
        train_scheduler = UDRTrainScheduler(
            config_file,
            training_traces,
            percent=args.real_trace_prob,
        )
    elif args.curriculum == "cl1":
        config_file = args.config_files[0]
        train_scheduler = CL1TrainScheduler(args.config_files, aurora)
    elif args.curriculum == "cl2":
        config_file = args.config_file
        train_scheduler = CL2TrainScheduler(config_file, aurora, args.baseline)
    else:
        raise NotImplementedError

    aurora.train(
        config_file,
        args.total_timesteps,
        train_scheduler,
        tb_log_name=args.exp_name,
        validation_traces=val_traces,
    )
Beispiel #24
0
def main():

    args = parse_inputs()
    center = int(args.center)
    if args.adaptive_bins == 'False':
        args.adaptive_bins = False
    a = args.semimajor_axis
    max_radius = args.max_r

    file = open(args.file, 'r')
    loaded_fields = pickle.load(file)
    distance = loaded_fields[0]
    y = loaded_fields[1]

    rank = CW.Get_rank()
    size = CW.Get_size()

    dist_min = np.min(distance)
    dist_max = np.max(distance)
    if args.adaptive_bins:
        rs = [0]
        rs = rs + list(set(distance[distance <= max_radius]))
        rs = np.sort(rs)
        rs = rs[::2]
        rs = np.array(rs)
        rs = np.append(rs, max_radius)
    else:
        rs = np.linspace(0.0, max_radius, args.no_of_bins)
    bin_size = rs[-1] - rs[-2]
    rs = np.append(rs, rs[-1] + bin_size)
    gradient = np.array(np.zeros(np.shape(distance)))

    rit = 1
    printed = False
    print_cen = False
    for r in range(len(rs)):
        if rank == rit:
            grad_add = np.array(np.zeros(np.shape(distance)))
            if r - 1 < 0:
                r_0 = rs[r]
            else:
                r_0 = rs[r - 1]
            r_1 = rs[r]
            if r + 1 == len(rs):
                r_2 = rs[r]
            else:
                r_2 = rs[r + 1]
            if r + 2 == len(rs) + 1:
                r_3 = rs[r]
            elif r + 2 == len(rs):
                r_3 = rs[r + 1]
            else:
                r_3 = rs[r + 2]
            shell_0_1 = np.where((distance >= r_0) & (distance < r_1))[0]
            shell_1_2 = np.where((distance >= r_1) & (distance < r_2))[0]
            shell_2_3 = np.where((distance >= r_2) & (distance < r_3))[0]
            if len(shell_0_1) == 0:
                y_0_1 = 0.0
            else:
                y_0_1 = np.mean(y[shell_0_1])
            if len(shell_1_2) == 0:
                y_1_2 = 0.0
            else:
                y_1_2 = np.mean(y[shell_1_2])
            if len(shell_2_3) == 0:
                y_2_3 = 0.0
            else:
                y_2_3 = np.mean(y[shell_2_3])
            grad_val = ((y_1_2 - y_0_1) / (r_1 - r_0) + (y_2_3 - y_1_2) /
                        (r_2 - r_1)) / 2.
            print("Gradient =", grad_val, "at Distance =", r_1, "on rank",
                  rank)
            grad_add[shell_1_2] = grad_val
            if len(enclosed_dist) == 0:
                enclosed_dist = np.where(distance < (dist_min + 1))
            enclosed_mass_val = np.sum(cell_mass[enclosed_dist])
            if center != 0:
                enclosed_mass_val = enclosed_mass_val + part_mass[center - 1]
                if print_cen == False:
                    #print "Centered on particle with mass", part_mass[center-1]/1.98841586e+33
                    print_cen = True
            if center != 0 and rs[r] > a and len(part_mass) > 1:
                if center == 1:
                    enclosed_mass_val = enclosed_mass_val + part_mass[1]
                else:
                    enclosed_mass_val = enclosed_mass_val + part_mass[0]
                if printed == False:
                    #print "Added other particle with mass", part_mass[0]/1.98841586e+33
                    printed = True
            elif center == 0 and rs[r] > a / 2. and len(part_mass) > 0:
                enclosed_mass_val = enclosed_mass_val + np.sum(part_mass)
                if printed == False:
                    #print "Added both particles with mass", np.sum(part_mass)/1.98841586e+33
                    printed = True
            enclosed_mass[ind] = enclosed_mass_val
            print("enclosed mass =", enclosed_mass_val / 1.98841586e+33,
                  ", Radius =", rs[r] / 1.49597870751e+13, "on rank", rank)
            CW.send(enclosed_mass, dest=0, tag=rank)
        if rank == 0:
            enclosed_mass_add = CW.recv(source=rit, tag=rit)
            enclosed_mass = enclosed_mass + enclosed_mass_add
        rit = rit + 1
        if rit == size:
            rit = 1

    if rank == 0:
        os.remove(args.file)
        file = open(args.save_file, 'w+')
        print("pickle file:", args.save_file)
        pickle.dump(enclosed_mass, file)
        file.close()
Beispiel #25
0
import matplotlib.patches
import collections
import sys
from scipy import stats
from mpi4py.MPI import COMM_WORLD as CW


def flatten(x):
    if isinstance(x, collections.Iterable):
        return [a for i in x for a in flatten(i)]
    else:
        return [x]


rank = CW.Get_rank()
size = CW.Get_size()

two_col_width = 7.20472  #inches
single_col_width = 3.50394  #inches
page_height = 10.62472
font_size = 10

sys.stdout.flush()
CW.Barrier()

pickle_file = sys.argv[1]
true_birth_con_pickle = sys.argv[2]
plot_gradient = False
read_pickle = bool(sys.argv[3])
baseline_yr = float(sys.argv[4])
#plot_key = sys.argv[2]
Beispiel #26
0
import logging
import itertools
import pickle
import json
from glob import glob

import numpy as np
import mdtraj as md

try:
    # this mpi will get overriden by the enspara mpi module in a few lines.
    from mpi4py.MPI import COMM_WORLD as mpi

    # this happens now and here becuase otherwise these changes to logging
    # don't propagate to enspara submodules.
    if mpi.Get_size() > 1:
        RANKSTR = "[Rank %s]" % mpi.Get_rank()
        logging.basicConfig(
            level=logging.DEBUG if mpi.Get_rank() == 0 else logging.INFO,
            format=('%(asctime)s ' + RANKSTR +
                    ' %(name)-26s %(levelname)-7s %(message)s'),
            datefmt='%m-%d-%Y %H:%M:%S')
        mpi_mode = True
    else:
        logging.basicConfig(
            level=logging.INFO,
            format=('%(asctime)s %(name)-8s %(levelname)-7s %(message)s'),
            datefmt='%m-%d-%Y %H:%M:%S')
        mpi_mode = False

except ModuleNotFoundError: