Beispiel #1
0
    def array(self, N=0, filename=None, component=None, root=0):
        """Dump data to numpy format on root processor."""
        assert (N == 0 or N == 1)
        is_root = comm.Get_rank() == root
        size = self.get_total_number_probes() if is_root else len(self)
        comp = self.value_size() if component is None else 1
        z = zeros((size, comp))

        # Retrieve all values
        if len(self) > 0:
            for k in range(comp):
                if is_root:
                    ids = self.get_probe_ids()
                    z[ids, k] = self.get_probes_component_and_snapshot(k, N)
                else:
                    z[:, k] = self.get_probes_component_and_snapshot(k, N)

        # Collect on root
        recvfrom = comm.gather(len(self), root=root)
        if is_root:
            for j, k in enumerate(recvfrom):
                if comm.Get_rank() != j:
                    ids = comm.recv(source=j, tag=101)
                    z0 = comm.recv(source=j, tag=102)
                    z[ids, :] = z0[:, :]
        else:
            ids = self.get_probe_ids()
            comm.send(ids, dest=root, tag=101)
            comm.send(z, dest=root, tag=102)

        if is_root:
            if filename:
                save(filename + "_statistics", z)
            return squeeze(z)
Beispiel #2
0
def main():
    args = parse_args()
    assert args.pretrained_model_path is None or args.pretrained_model_path.endswith(
        ".ckpt")
    os.makedirs(args.save_dir, exist_ok=True)
    save_args(args)
    set_seed(args.seed + COMM_WORLD.Get_rank() * 100)
    nprocs = COMM_WORLD.Get_size()

    # Initialize model and agent policy
    aurora = Aurora(args.seed + COMM_WORLD.Get_rank() * 100, args.save_dir,
                    int(7200 / nprocs), args.pretrained_model_path,
                    tensorboard_log=args.tensorboard_log)
    # training_traces, validation_traces,
    training_traces = []
    val_traces = []
    if args.train_trace_file:
        with open(args.train_trace_file, 'r') as f:
            for line in f:
                line = line.strip()
                if args.dataset == 'pantheon':
                    queue = 100  # dummy value
                    # if "ethernet" in line:
                    #     queue = 500
                    # elif "cellular" in line:
                    #     queue = 50
                    # else:
                    #     queue = 100
                    training_traces.append(Trace.load_from_pantheon_file(
                        line, queue=queue, loss=0))
                elif args.dataset == 'synthetic':
                    training_traces.append(Trace.load_from_file(line))
                else:
                    raise ValueError

    if args.val_trace_file:
        with open(args.val_trace_file, 'r') as f:
            for line in f:
                line = line.strip()
                if args.dataset == 'pantheon':
                    queue = 100  # dummy value
                    # if "ethernet" in line:
                    #     queue = 500
                    # elif "cellular" in line:
                    #     queue = 50
                    # else:
                    #     queue = 100
                    val_traces.append(Trace.load_from_pantheon_file(
                        line, queue=queue, loss=0))
                elif args.dataset == 'synthetic':
                    val_traces.append(Trace.load_from_file(line))
                else:
                    raise ValueError
    print(args.randomization_range_file)

    aurora.train(args.randomization_range_file,
                 args.total_timesteps, tot_trace_cnt=args.total_trace_count,
                 tb_log_name=args.exp_name, validation_flag=args.validation,
                 training_traces=training_traces,
                 validation_traces=val_traces)
Beispiel #3
0
    def array(self, N=None, filename=None, component=None, root=0):
        """Dump data to numpy format on root processor for all or one snapshot."""
        is_root = comm.Get_rank() == root
        size = self.get_total_number_probes() if is_root else len(self)
        comp = self.value_size() if component is None else 1
        if not N is None:
            z = zeros((size, comp))
        else:
            z = zeros((size, comp, self.number_of_evaluations()))

        # Get all values
        if len(self) > 0:
            if not N is None:
                for k in range(comp):
                    if is_root:
                        ids = self.get_probe_ids()
                        z[ids,
                          k] = self.get_probes_component_and_snapshot(k, N)
                    else:
                        z[:, k] = self.get_probes_component_and_snapshot(k, N)
            else:
                for i, (index, probe) in enumerate(self):
                    j = index if is_root else i
                    if not N is None:
                        z[j, :] = probe.get_probe_at_snapshot(N)
                    else:
                        for k in range(self.value_size()):
                            z[j, k, :] = probe.get_probe_sub(k)

        # Collect values on root
        recvfrom = comm.gather(len(self), root=root)
        if is_root:
            for j, k in enumerate(recvfrom):
                if comm.Get_rank() != j:
                    ids = comm.recv(source=j, tag=101)
                    z0 = comm.recv(source=j, tag=102)
                    z[ids, :] = z0[:, :]
        else:
            ids = self.get_probe_ids()
            comm.send(ids, dest=root, tag=101)
            comm.send(z, dest=root, tag=102)

        if is_root:
            if filename:
                if not N is None:
                    save(filename + "_snapshot_" + str(N), z)
                else:
                    save(filename + "_all", z)
            return squeeze(z)
Beispiel #4
0
def get_cpu_raw(cpu_data, k):
    # Make sure overlapped data is accurate as well.
    xr = space.get_space_info()['x_range']
    if comm.Get_rank() == 0:
        pad_back = cpu_data[-k:, :, :]
    else:
        pad_back = cpu_data[xr[0] - k:xr[0], :, :]

    if comm.Get_rank() == comm.Get_size() - 1:
        pad_front = cpu_data[:k, :, :]
    else:
        pad_front = cpu_data[xr[1]:xr[1] + k, :, :]

    return np.concatenate((pad_back, cpu_data[xr[0]:xr[1],:,:], \
                                pad_front), axis=0)
Beispiel #5
0
def kirchhoff_integral(front, back):

    rank = multi_process.Get_rank()

    source_count = front.source.source_count
    num = process_number

    p0 = source_count % (num - 1)
    p1 = source_count // (num - 1)
    parameters = list()

    if p0 == 0:

        for i in range(num - 1):

            parameters.append(range(p1 * i, p1 * (i + 1)))

    else:

        for i in range(num - 1):

            if i + 1 != num - 1:
                parameters.append(range(p1 * i, p1 * (i + 1)))
            else:
                parameters.append(range(p1 * (num - 2), p1 * (num - 1) + p0))

    if rank == 0:

        for i in range(num - 1):

            multi_process.Send([np.array(i), mpi.INT], dest=i + 1, tag=i + 1)

        for i in range(num - 1):

            isize = len(parameters[i])
            wavefronts = np.zeros(
                (isize, int(2 * back.size[1] + 1), int(2 * back.size[0] + 1)),
                dtype=np.complex128)

            multi_process.Recv([wavefronts, mpi.COMPLEX],
                               source=i + 1,
                               tag=100 * i + 1)

            for k in range(isize):
                back.wavefront.append(wavefronts[k, :, :])

        back.intensity = np.sum(np.abs(np.array(back.wavefront))**2, 0)

    else:

        index = np.array(int())
        multi_process.Recv([index, mpi.INT], source=0, tag=rank)

        results = [
            _propagate._point(i, front, back) for i in parameters[index]
        ]

        multi_process.Send([np.array(results), mpi.COMPLEX],
                           dest=0,
                           tag=100 * index + 1)
Beispiel #6
0
def debug(*s):
    import sys
    from mpi4py.MPI import COMM_WORLD
    print('[rank:{}]'.format(COMM_WORLD.Get_rank()),
          *s,
          file=sys.stderr,
          flush=True)
Beispiel #7
0
def _init_gpu(comm):
    """ Chooses a gpu and creates a context on it. """
    # Find out how many GPUs are available to us on this node.
    driver.init()
    num_gpus = driver.Device.count()

    # Figure out the names of the other hosts.
    rank = comm.Get_rank()  # Find out which process I am.
    name = MPI.Get_processor_name()  # The name of my node.
    hosts = comm.allgather(name)  # Get the names of all the other hosts

    # Find out which GPU to take (by precedence).
    gpu_id = hosts[0:rank].count(name)
    if gpu_id >= num_gpus:
        raise TypeError('No GPU available.')

    # Create a context on the appropriate device.
    for k in range(num_gpus):
        try:
            device = driver.Device((gpu_id + k) % num_gpus)
            context = device.make_context()
        except:
            continue
        else:
            #             print "On %s: process %d taking gpu %d of %d.\n" % \
            #                 (name, rank, gpu_id+k, num_gpus)
            break

    return device, context  # Return device and context.
Beispiel #8
0
def calc(psi, nt, C):
    i = slice(halo, psi.size - halo)

    for _ in range(nt):
        ###
        #psi[:halo] = psi[i][-halo:]
        #psi[-halo:] = psi[i][:halo]

        rank = mpi.Get_rank()
        size = mpi.Get_size()

        right = (rank + 1) % size
        left = (rank - 1 + size) % size

        psi_without_halo = psi[i]
        psi_with_halo = psi

        mpi.send(psi_without_halo[-halo:], dest=right)
        psi_with_halo[:halo] = mpi.recv(source=left)
        mpi.send(psi_without_halo[:halo], dest=left)
        psi_with_halo[-halo:] = mpi.recv(source=right)
        ###

        psi[i] = upwind(psi, i, C)

    return psi
Beispiel #9
0
def simulate(name, check_success_only=False):
    """ Read simulation from input file, simulate, and write out results. """

    # Reset the environment variables pointing to the temporary directory.
    tempfile.tempdir = '/tmp'

    # Create the reporter function.
    write_status = lambda msg: open(name + '.status', 'a').write(msg)
    if comm.Get_rank() == 0:
        # write_status('EXEC initializing\n')
        def rep(err):
            write_status('%e\n' % err)
    else:  # No reporting needed for non-root nodes.

        def rep(err):
            pass

    # Get input parameters.
    params = get_parameters(name)

    # Define operations needed for the lumped bicg operation.
    b, x, ops, post_cond = maxwell_ops_lumped.ops(params)

    # Solve!
    start_time = time.time()
    rep.stime = start_time
    x, err, success = bicg.solve_symm_lumped(b, x=x, \
                                            max_iters=params['max_iters'], \
                                            reporter=rep, \
                                            err_thresh=params['err_thresh'], \
                                            **ops)

    if check_success_only:  # Don't write output, just see if we got a success.
        return success

    # Gather results onto root's host memory.
    result = {  'E': [E.get() for E in x], \
                'err': err, \
                'success': success}

    # Write results to output file.
    if comm.Get_rank() == 0:
        result['E'] = post_cond(result['E'])  # Apply postconditioner.
        write_results(name, result)

    return success
Beispiel #10
0
def simulate(N, D, S, G, dt):
    x0, v0, m = initial_cond(N, D)
    pool = Pool()
    if COMM_WORLD.Get_rank() == 0:
        for s in range(S):
            x1, v1 = timestep(x0, v0, G, m, dt, pool)
            x0, v0 = x1, v1
    else:
        pool.wait()
Beispiel #11
0
def main(argv=None):

    args = process_command_line(argv)

    # note that in MPI mode, lengths will be global, whereas data will
    # be local (i.e. only this node's data).
    lengths, data = load_trjs_or_features(args)

    kwargs = {}
    if args.cluster_iterations is not None:
        kwargs['kmedoids_updates'] = int(args.cluster_iterations)

    clustering = args.Clusterer(metric=args.cluster_distance,
                                n_clusters=args.cluster_number,
                                cluster_radius=args.cluster_radius,
                                mpi_mode=mpi_mode,
                                **kwargs)

    clustering.fit(data)

    logger.info("Clustered %s frames into %s clusters in %s seconds.",
                sum(lengths), len(clustering.centers_), clustering.runtime_)

    result = clustering.result_
    if mpi_mode:
        local_ctr_inds, local_dists, local_assigs = \
            result.center_indices, result.distances, result.assignments

        with timed("Reassembled dist and assign arrays in %.2f sec",
                   logging.info):
            all_dists = mpi.ops.assemble_striped_ragged_array(
                local_dists, lengths)
            all_assigs = mpi.ops.assemble_striped_ragged_array(
                local_assigs, lengths)
            ctr_inds = mpi.ops.convert_local_indices(local_ctr_inds, lengths)

        result = ClusterResult(center_indices=ctr_inds,
                               distances=all_dists,
                               assignments=all_assigs,
                               centers=result.centers)
    result = result.partition(lengths)

    if mpi_comm.Get_rank() == 0:
        write_centers_indices(args.center_indices,
                              [(t, f * args.subsample)
                               for t, f in result.center_indices])
        write_centers(result, args)
        write_assignments_and_distances_with_reassign(result, args)
    mpi_comm.barrier()

    logger.info("Success! Data can be found in %s.",
                os.path.dirname(args.distances))

    return 0
Beispiel #12
0
def lens(front, mirror, mode):

    rank = multi_process.Get_rank()

    if rank == 0:
        result = _propagate._lens(front, mirror, mode)
    else:
        result = None

    recv_data = multi_process.bcast(result, root=0)
    mirror.lens = recv_data
Beispiel #13
0
 def test_partition(self):
     """ Make sure the x_ranges span the entire space without any gaps. """
     shapes = ((200, 30, 10), (33, 10, 10), (130, 5, 5), (111, 2, 2))
     for shape in shapes:
         space.initialize_space(shape)
         x = comm.gather(space.get_space_info()['x_range'])
         if comm.Get_rank() == 0:
             self.assertEqual(x[0][0], 0)
             self.assertEqual(x[-1][-1], space.get_space_info()['shape'][0])
             for k in range(len(x) - 1):
                 self.assertEqual(x[k][1], x[k + 1][0])
Beispiel #14
0
    def test_recover(self):
        """ Make sure we can store and retrieve information from the GPU. """
        for case in self.cases:
            space.initialize_space(case['shape'])
            data = np.random.randn(*case['shape']).astype(case['dtype'])
            cpu_data = np.empty_like(data)
            comm.Allreduce(data, cpu_data)
            g = Grid(cpu_data)
            gpu_data = g.get()
            if comm.Get_rank() == 0:
                self.assertTrue((cpu_data == gpu_data).all())

            # Test with-overlap cases as well.
            for k in range(1, 3):
                g = Grid(cpu_data, x_overlap=k)
                gpu_data = g.get()
                if comm.Get_rank() == 0:
                    self.assertTrue((cpu_data == gpu_data).all())

                cpu_raw = get_cpu_raw(cpu_data, k)
                self.assertTrue((cpu_raw == g._get_raw()).all())
Beispiel #15
0
def source_spread(source, back):

    rank = multi_process.Get_rank()

    if rank == 0:
        result = _propagate._source_spread(source, back)
    else:
        result = None

    # Broadcast
    recv_data = multi_process.bcast(result, root=0)
    back.wavefront = recv_data[0]
    back.intensity = recv_data[1]
Beispiel #16
0
    def __init__(self, shape, device, context):
        """ Constructor for the Space class. 

        Input variables
        shape -- Three-element tuple of positive integers defining the size of
            the space in the x-, y-, and z-directions.

        """

        # Make sure shape has exactly three elements.
        if len(shape) is not 3:
            raise TypeError('Shape must have exactly three elements.')

        # Make sure they are all integers.
        if any([type(s) is not int for s in shape]):
            raise TypeError('Shape must have only integer elements.')

        # Make sure all elements are positive.
        if any([s < 1 for s in shape]):
            raise TypeError('Shape must have only integer elements.')

#         # Make sure stencil is a single, non-negative integer.
#         if (type(stencil) is not int) or (stencil < 0):
#             raise TypeError('Stencil must be a non-negative scalar integer.')
#
# Initialize the space.
        self.shape = shape

        # Get MPI information.
        rank = comm.Get_rank()
        size = comm.Get_size()

        # Nodes to pass forward and backward (along x) to.
        self.mpi_adj = {'forw': (rank + 1) % size, 'back': (rank - 1) % size}

        # Grid is too small to be partitioned.
        if (size > self.shape[0]):
            raise TypeError('Shape is too short along x to be partitioned.')

        # Create the context on the appropriate GPU.
        # self.device, self.context = self._init_gpu(comm)
        self.device = device
        self.context = context

        # Partition the space.
        # Each space is responsible for field[x_range[0]:x_range[1],:,:].
        get_x_range = lambda r: (int(self.shape[0] * (float(r) / size)), \
                                int(self.shape[0] * (float(r+1) / size)))
        self.x_range = get_x_range(rank)

        self.all_x_ranges = [get_x_range(r) for r in range(size)]
Beispiel #17
0
    def get(self):
        """ Redefined so that we don't get overlap data. """
        # Get our section of the grid (excluding overlap).
        if self._xlap is 0:
            data = self.data.get()
        else:
            data = self.data.get()[self._xlap:-self._xlap,:,:]
        
#         return np.concatenate(comm.allgather(data), axis=0) # Super-simple.

        result = comm.gather(data) # Gather all peices to root.
        if comm.Get_rank() == 0:
            # Root node glues everything together.
            return np.concatenate(result, axis=0) 
        else: 
            return None
Beispiel #18
0
def _build_cat_distributed(comm, name, path):
    # Control flow explanation:
    # * `build_err` starts out as `None`
    # * Rank 1 to N wait for a broadcast from rank 0 to receive the new value
    #   for `build_err`
    # * Rank 0 splits off from the others and executes the build.
    #   * If it builds correctly it finishes the collective `build_err`
    #     broadcast with the initial value `None`: all nodes continue.
    #   * If it errors, it finishes the collective broadcast with the caught err
    #
    # All MPI ranks either continue or raise the same err. (prevents stalling)
    build_err = None
    if not comm.Get_rank():
        try:
            _build_cat_local(name, path)
        except Exception as e:
            build_err = e
    build_err = comm.bcast(build_err, root=0)
    if build_err:
        raise build_err
Beispiel #19
0
def process_dir(indir, outdir):
    main_text_files = glob.glob("{0}/main/*.txt".format(indir))
    rank = world.Get_rank()
    size = world.Get_size()
    main_text_files_2 = []
    for m in main_text_files:
        tilename = find_tilename(m)
        out_main = "{0}/main/{1}.fits".format(outdir, tilename)
        out_epoch = "{0}/epoch/{1}.fits".format(outdir, tilename)
        if not (os.path.exists(out_main) and os.path.exists(out_epoch)):
            main_text_files_2.append(m)
    main_text_files = main_text_files_2
    print "{0} files left to do".format(len(main_text_files))

    for i, main_text_file in enumerate(main_text_files):
        if i % size != rank:
            continue
        print rank, main_text_file
        tilename = find_tilename(main_text_file)
        epoch_text_file = "{0}/epoch/{1}.epoch.txt".format(indir, tilename)
        out_main = "{0}/main/{1}.fits".format(outdir, tilename)
        out_epoch = "{0}/epoch/{1}.fits".format(outdir, tilename)
        if os.path.exists(out_main) and os.path.exists(out_epoch):
            continue
        try:
            process_text(main_text_file,
                         epoch_text_file,
                         out_main,
                         out_epoch,
                         "r",
                         blind=False,
                         quiet=False,
                         report=report)
        except:
            print "{} did not work".format(out_main)
        if report:
            return
Beispiel #20
0
def main(nt, nx, dt, C, x_min, x_max):
    dx = (x_max - x_min) / nx

    ###
    size = mpi.Get_size()
    rank = mpi.Get_rank()

    # dla nx=5 i size=3: lepiej 2+2+1 niż 1+1+3
    import math
    nx_max = math.ceil(nx / size)
    nx = nx_max if (rank + 1) * nx_max <= nx else nx - rank * nx_max
    assert nx > 0

    x_min += dx * nx_max * rank
    x_max = min(x_max, x_min + dx * nx_max)
    #print(rank, '/', size, ':', nx, x_min, x_max)
    ###

    x = np.linspace(x_min - halo * dx,
                    x_max + halo * dx,
                    num=nx + 2 * halo,
                    endpoint=False)
    psi = calc(psi_0(x), nt, C)
    plot(x[halo:-halo], psi[halo:-halo], psi_0, nt, v=C / dt * dx)
Beispiel #21
0
def main():

    rank = CW.Get_rank()
    size = CW.Get_size()

    args = parse_inputs()

    n_orb = int(args.no_orbits)
    n_systems = int(args.no_systems)
    q_min = 0.05
    my_orb = bo.random_orbits(n_orb=n_orb)
    US_group_vel = 10.
    UCL_group_vel = 4.
    #Madsen, 2002 gives the STANDARD ERROR of the US and UCL velcs to be 1.3 and 1.9km/s
    US_group_std = 1.3 * args.group_velocity_sigma  #From Preibisch et al., 2008
    UCL_group_std = 1.3 * args.group_velocity_sigma
    standard_std = {'F': 1.08, 'G': 0.63, 'K': 1.43, 'M': 2.27}  # 2.0
    astrophysical_std = args.astrophysical_std  #Astrophysical radial velocity uncertainty

    Object = []
    Region = []
    IR_excess = []
    Temp_sptype = []
    Pref_template = []
    Obs_info = []
    all_bayes = [[], []]

    RV_standard_info = {}

    sys.stdout.flush()
    CW.Barrier()

    #Read in RV standard list
    header = 0
    with open('/home/100/rlk100/RV_standard_list.csv', 'rU') as f:
        reader = csv.reader(f)
        for row in reader:
            if header != 0:
                RV_standard_info[row[0]] = (float(row[5]), float(row[6]),
                                            float(row[7]))
            else:
                header = 1
        f.close()

    sys.stdout.flush()
    CW.Barrier()

    print("Reading in current spreadsheet", args.input_file)
    header = 0
    reshape_len = -1
    with open(args.input_file, 'rU') as f:
        reader = csv.reader(f)
        for row in reader:
            if header != 0:
                if 'U4' in row[0]:
                    row[0] = 'UCAC4' + row[0].split('U4')[-1]
                Object.append(row[0])
                Region.append(row[1])
                IR_excess.append(row[5])
                Pref_template.append(row[15])  #row[18])
                Temp_sptype.append(row[16])  #row[19])
                if len(row) > 17:
                    Obs = np.array(row[17:])
                    Obs = np.delete(Obs, np.where(Obs == ''))
                    if reshape_len == -1:
                        for ob in Obs:
                            reshape_len = reshape_len + 1
                            if '/' in ob and ob != Obs[0]:
                                break
                    #if len(Obs) > 5:
                    #    Obs = np.reshape(Obs, (len(Obs)/5, 5))
                    Obs = np.reshape(Obs,
                                     (len(Obs) / reshape_len, reshape_len))
                    for ind_obs in Obs:
                        if '/' in ind_obs[0]:
                            new_format = '20' + ind_obs[0].split('/')[
                                -1] + '-' + ind_obs[0].split('/')[-2] + '-' + (
                                    "%02d" % int(ind_obs[0].split('/')[-3]))
                            ind_obs[0] = new_format
                else:
                    Obs = np.array([])
                Obs_info.append(Obs)
            if header == 0:
                header = 1
        f.close()
    del header

    sys.stdout.flush()
    CW.Barrier()

    Obj_bayes = np.nan * np.zeros(len(Object))

    #Read in currently calculated Bayes Factors:
    if args.restart_calc != 'False':
        print("Reading in calulated Bayes factors")
        header = 0
        with open(args.bayes_file, 'rU') as f:
            reader = csv.reader(f)
            for row in reader:
                if header != 0:
                    ind = Object.index(row[0])
                    Obj_bayes[ind] = float(row[2])
                    if row[1] == 'US':
                        all_bayes[0].append(float(row[2]))
                    else:
                        all_bayes[1].append(float(row[2]))
                    del ind
                else:
                    header = 1
            f.close()
        del header

    sys.stdout.flush()
    CW.Barrier()

    if args.restart_calc != 'False' and rank == 0:
        print("Creating new bayes file")
        f = open(args.bayes_file, 'w')
        f.write('Object,Region,Bayes_factor\n')
        f.close()

    sys.stdout.flush()
    CW.Barrier()

    inds = list(range(len(Object)))
    skip_inds = np.where(np.array(IR_excess) == 'NN')[0]
    for skit in skip_inds:
        inds.remove(skit)
    skip_inds = np.where(np.array(Pref_template) == '')[0]
    for skit in skip_inds:
        inds.remove(skit)
    del skip_inds
    del IR_excess

    rit = 0
    sys.stdout.flush()
    CW.Barrier()
    for obj in inds:
        Pref_template_name = Pref_template[obj].split('_')[0]
        if np.isnan(Obj_bayes[obj]) and rank == rit:
            print("Doing object:", Object[obj], "on rank:", rank)
            likelihoods = []
            single_likelihoods = []

            #Produces masses within +/- 10% of the mass of the template.
            #!!! Mike suggests a single mass.
            M_1 = (np.random.random(n_systems) *
                   (RV_standard_info[Pref_template_name][1] -
                    RV_standard_info[Pref_template_name][0])
                   ) + RV_standard_info[Pref_template_name][0]

            #Generates mass ratios with minium mass ratio of q_min (default 0.01?, should this be dependant on the primary mass? Because sometimes low mass ratios could give very low mass companions i.e. BD mass...)
            #!!! Mike suggests 0.05 due to brown dwarf desert.
            q = (np.random.random(n_systems) * (1 - q_min)) + q_min

            #from Primary masses and mass ratios, secondary masses can get calculated
            M_2 = M_1 * q

            #Get dates of the observations of the object
            jds = Obs_info[obj][:, 1].astype(np.float)

            #get observed data, and add in the error in the standards in quadrature.
            #This relates to the spectrograph stability
            #There is also an astrophysical error due to these objects being rapid rotators etc.
            RV_standard_err = standard_std[Temp_sptype[obj][0]]
            err = np.sqrt(Obs_info[obj][:, 3].astype(float)**2 +
                          RV_standard_err**2 + astrophysical_std**2)
            observed_rv = Obs_info[obj][:, 2].astype(float)

            #IN A LOOP iterate over random orbits:
            for orb in range(n_orb):
                #FIXME: Figure out which velocity to use!
                if Region[obj] == 'US':
                    if args.group_velocity == 'True':
                        v_group = np.random.normal(
                            US_group_vel,
                            np.sqrt(US_group_std**2 + RV_standard_err**2),
                            n_systems)
                    else:
                        v_group = np.random.normal(
                            np.mean(observed_rv),
                            np.sqrt(US_group_std**2 + RV_standard_err**2),
                            n_systems)
                else:
                    if args.group_velocity == 'True':
                        v_group = np.random.normal(
                            UCL_group_vel,
                            np.sqrt(UCL_group_std**2 + RV_standard_err**2),
                            n_systems)
                    else:
                        v_group = np.random.normal(
                            np.mean(observed_rv),
                            np.sqrt(UCL_group_std**2 + RV_standard_err**2),
                            n_systems)

                #generate orbit?
                #!!! Find just one set of orbital parameters at at a time, and
                #scale the RVS. OR if you really want you can compute a, i etc
                #yourself and plug these into my_orb, but some RV scalign is still needed.
                rho, theta, normalised_vr = bo.binary_orbit(my_orb,
                                                            jds,
                                                            plot_orbit_no=orb)
                for system in range(n_systems):
                    actual_vr = bo.scale_rv(normalised_vr,
                                            my_orb['P'][orb],
                                            M_1[system],
                                            M_2[system],
                                            my_orb['i'][orb],
                                            group_velocity=v_group[system])

                    this_likelihood = bo.calc_likelihood(
                        actual_vr, observed_rv, err)
                    likelihoods.append(this_likelihood)
                    #THEN CALCULATE PROBABILITY OF BEING A SINGLE STAR
                    single_likelihoods.append(
                        bo.calc_likelihood(v_group[system], observed_rv, err))
                    del actual_vr
                    del this_likelihood
                del v_group
            del M_1
            del q
            del M_2
            del jds
            del RV_standard_err
            del err
            del observed_rv

            #THEN CALCULATE BAYES FACTOR
            bayes_factor = np.mean(likelihoods) / np.mean(single_likelihoods)
            print(("Bayes Factor: {0:5.2f} for ".format(bayes_factor) +
                   Object[obj]), "on rank", rank, "with SpT", Temp_sptype[obj])
            del likelihoods
            del single_likelihoods
            if Region[obj] == 'US':
                send_data = [0.0, float(obj), bayes_factor, Temp_sptype[obj]]
                #print "Sending data:", send_data, "from rank:", rank
                if rank == 0:
                    bayes_update = send_data
                else:
                    CW.send(send_data, dest=0, tag=rank)
            else:
                send_data = [1.0, float(obj), bayes_factor, Temp_sptype[obj]]
                #print "Sending data:", send_data, "from rank:", rank
                if rank == 0:
                    bayes_update = send_data
                else:
                    CW.send(send_data, dest=0, tag=rank)
            del send_data
            if rank == 0:
                all_bayes[int(bayes_update[0])].append(bayes_update[2])
                Obj_bayes[int(bayes_update[1])] = bayes_update[2]
                print("Updated Bayes factors retrieved from rank 0 for object",
                      Object[int(bayes_update[1])])
                f = open(args.bayes_file, 'a')
                write_string = Object[int(bayes_update[1])] + ',' + Region[int(
                    bayes_update[1])] + ',' + str(bayes_update[2]) + ',' + str(
                        bayes_update[3]) + '\n'
                f.write(write_string)
                f.close()
                del bayes_update
                del write_string

        rit = rit + 1
        if rit == size:
            sys.stdout.flush()
            CW.Barrier()
            rit = 0
            if rank == 0:

                print("UPDATING CALCULATED BAYES VALUES")
                for orit in range(1, size):
                    bayes_update = CW.recv(source=orit, tag=orit)
                    all_bayes[int(bayes_update[0])].append(bayes_update[2])
                    Obj_bayes[int(bayes_update[1])] = bayes_update[2]
                    print("Updated Bayes factors retrieved from rank", orit,
                          "for object", Object[int(bayes_update[1])])
                    f = open(args.bayes_file, 'a')
                    write_string = Object[int(
                        bayes_update[1])] + ',' + Region[int(
                            bayes_update[1])] + ',' + str(
                                bayes_update[2]) + ',' + str(
                                    bayes_update[3]) + '\n'
                    f.write(write_string)
                    f.close()
                    del bayes_update
                    del write_string
            sys.stdout.flush()
            CW.Barrier()

    sys.stdout.flush()
    CW.Barrier()
    if rank == 0:

        print("UPDATING CALCULATED BAYES VALUES")
        for orit in range(1, size):
            bayes_update = CW.recv(source=orit, tag=orit)
            all_bayes[int(bayes_update[0])].append(bayes_update[2])
            Obj_bayes[int(bayes_update[1])] = bayes_update[2]
            print("Updated Bayes factors retrieved from rank", orit,
                  "for object", Object[int(bayes_update[1])])
            f = open(args.bayes_file, 'a')
            write_string = Object[int(bayes_update[1])] + ',' + Region[int(
                bayes_update[1])] + ',' + str(bayes_update[2]) + ',' + str(
                    bayes_update[3]) + '\n'
            f.write(write_string)
            f.close()
            del bayes_update
            del write_string
        sys.stdout.flush()
        CW.Barrier()
    print("Finished Calculating bayes factors!")
Beispiel #22
0
import pickle
import matplotlib.patches
import collections
import sys
from scipy import stats
from mpi4py.MPI import COMM_WORLD as CW


def flatten(x):
    if isinstance(x, collections.Iterable):
        return [a for i in x for a in flatten(i)]
    else:
        return [x]


rank = CW.Get_rank()
size = CW.Get_size()

two_col_width = 7.20472  #inches
single_col_width = 3.50394  #inches
page_height = 10.62472
font_size = 10

sys.stdout.flush()
CW.Barrier()

pickle_file = sys.argv[1]
true_birth_con_pickle = sys.argv[2]
plot_gradient = False
read_pickle = bool(sys.argv[3])
baseline_yr = float(sys.argv[4])
Beispiel #23
0
def main():
    args = parse_args()
    assert (not args.pretrained_model_path
            or args.pretrained_model_path.endswith(".ckpt"))
    os.makedirs(args.save_dir, exist_ok=True)
    save_args(args, args.save_dir)
    set_seed(args.seed + COMM_WORLD.Get_rank() * 100)
    nprocs = COMM_WORLD.Get_size()

    # Initialize model and agent policy
    aurora = Aurora(
        args.seed + COMM_WORLD.Get_rank() * 100,
        args.save_dir,
        int(args.val_freq / nprocs),
        args.pretrained_model_path,
        tensorboard_log=args.tensorboard_log,
    )
    # training_traces, validation_traces,
    training_traces = []
    val_traces = []
    if args.curriculum == "udr":
        config_file = args.config_file
        if args.train_trace_file:
            with open(args.train_trace_file, "r") as f:
                for line in f:
                    line = line.strip()
                    training_traces.append(Trace.load_from_file(line))

        if args.validation and args.val_trace_file:
            with open(args.val_trace_file, "r") as f:
                for line in f:
                    line = line.strip()
                    if args.dataset == "pantheon":
                        queue = 100  # dummy value
                        val_traces.append(
                            Trace.load_from_pantheon_file(line,
                                                          queue=queue,
                                                          loss=0))
                    elif args.dataset == "synthetic":
                        val_traces.append(Trace.load_from_file(line))
                    else:
                        raise ValueError
        train_scheduler = UDRTrainScheduler(
            config_file,
            training_traces,
            percent=args.real_trace_prob,
        )
    elif args.curriculum == "cl1":
        config_file = args.config_files[0]
        train_scheduler = CL1TrainScheduler(args.config_files, aurora)
    elif args.curriculum == "cl2":
        config_file = args.config_file
        train_scheduler = CL2TrainScheduler(config_file, aurora, args.baseline)
    else:
        raise NotImplementedError

    aurora.train(
        config_file,
        args.total_timesteps,
        train_scheduler,
        tb_log_name=args.exp_name,
        validation_traces=val_traces,
    )
Beispiel #24
0
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

"""

from mpi4py.MPI import COMM_WORLD as COMM
from LFPy import NetworkPopulation, NetworkCell
# class NetworkCell parameters
cellParameters = dict(
    morphology='BallAndStick.hoc',
    templatefile='BallAndStickTemplate.hoc',
    templatename='BallAndStickTemplate',
    templateargs=None,
    passive=False,
    delete_sections=False,
)
# class NetworkPopulation parameters
populationParameters = dict(
    Cell=NetworkCell,
    cell_args=cellParameters,
    pop_args=dict(radius=100, loc=0., scale=20.),
    rotation_args=dict(x=0, y=0),
)
# create population instance
population = NetworkPopulation(first_gid=0, name='E', **populationParameters)
for cell in population.cells:
    print('RANK {}; pop {}; gid {}; cell {}'.format(COMM.Get_rank(),
                                                    population.name, cell.gid,
                                                    cell))
Beispiel #25
0
    n_common_b = np.sum(bulge_good & disc_good & bulge_better)
    n_common_d = np.sum(bulge_good & disc_good & disc_better)
    n_only_b = np.sum(bulge_good & (~disc_good))
    n_only_d = np.sum(disc_good & (~bulge_good))  
    n_check = n_only_b + n_only_d +  n_common_b + n_common_d    
    
    cat_final.write(bord_filename)
    print '%s  n_total=%d n_common=%d n_common_b=%d n_common_d=%d n_only_b=%d n_only_d=%d n_check=%d' % (bord_filename,n_total,n_common,n_common_b,n_common_d,n_only_b,n_only_d,n_check)

    return disc_ids, bulge_ids


import sys
if '--mpi' in sys.argv:
    from mpi4py.MPI import COMM_WORLD as world
    rank = world.Get_rank()
    size = world.Get_size()
else:
    rank = 0
    size = 1

def main():

    bulge_path = 'bulge/main/DES*'
    disc_path = 'disc/main/DES*'

    print "Running merge script."

    bulge_files = glob.glob(bulge_path)
    disc_files = glob.glob(disc_path)
Beispiel #26
0
def main():

    args = parse_inputs()
    if args.adaptive_bins == 'False':
        args.adaptive_bins = False
    else:
        args.adaptive_bins = True

    file = open(args.file, 'r')
    loaded_fields = pickle.load(file)
    distance = loaded_fields[0]
    y = loaded_fields[1]
    bin_data = loaded_fields[2]

    rank = CW.Get_rank()
    size = CW.Get_size()

    dist_min = np.min(distance)
    dist_max = np.max(distance)
    if args.adaptive_bins:
        rs = [0] + list(set(distance))
        rs = np.sort(rs)
        #rs = rs[::2]
        rs = np.array(rs)
        rs = np.append(rs, (dist_max + (rs[-1] - rs[-2])))
    else:
        rs = np.linspace(dist_min, dist_max, args.no_of_bins)
        rs = np.append(rs, (dist_max + (rs[-1] - rs[-2])))
    bin_size = rs[-1] - rs[-2]
    rs = np.append(rs, rs[-1] + bin_size)
    gradient = np.array(np.zeros(np.shape(distance)))
    #print "RS:", rs

    rit = 1
    printed = False
    print_cen = False
    for r in range(len(rs)):
        if rank == rit:
            grad_add = np.array(np.zeros(np.shape(distance)))
            if r - 1 < 0:
                r_0 = rs[r]
            else:
                r_0 = rs[r - 1]
            r_1 = rs[r]
            if r + 1 == len(rs):
                r_2 = rs[r]
            else:
                r_2 = rs[r + 1]
            if r + 2 == len(rs) + 1:
                r_3 = rs[r]
            elif r + 2 == len(rs):
                r_3 = rs[r + 1]
            else:
                r_3 = rs[r + 2]
            mid_01 = (r_1 + r_0) / 2.
            mid_23 = (r_3 + r_2) / 2.
            shell_01 = np.where((distance >= r_0) & (distance < r_1))[0]
            shell_12 = np.where((distance >= r_1) & (distance < r_2))[0]
            shell_23 = np.where((distance >= r_2) & (distance < r_3))[0]
            if len(shell_01) == 0:
                print("FOUND EMPTY SHELL")
                y_01 = 0.0
            else:
                y_01 = np.mean(y[shell_01])
            if len(shell_23) == 0:
                y_23 = 0.0
                print("FOUND EMPTY SHELL")
            else:
                y_23 = np.mean(y[shell_23])
            grad_val = (y_23 - y_01) / (2. * (mid_23 - mid_01))
            #if rank == 1:
            #print "r_0, r_1, r_2, r_3:", r_0, r_1, r_2, r_3
            #print "mid_01, mid_12, mid_23:", mid_01, mid_12, mid_23
            #print "y_01, y_12, y_23:", y_01, y_12, y_23, "on rank", rank
            #print "grad_1, grad_2, average", grad_1, grad_2, grad_val, "on rank", rank
            #print "Gradient =", grad_val, "at Distance =", np.mean([mid_01, mid_23]), "on rank", rank
            grad_add[shell_12] = grad_val
            #grad_add[shell] = grad_val
            CW.send(grad_add, dest=0, tag=rank)
        if rank == 0:
            grad_add = CW.recv(source=rit, tag=rit)
            gradient = gradient + grad_add
        rit = rit + 1
        if rit == size:
            rit = 1

    if rank == 0:
        os.remove(args.file)
        file = open(args.save_file, 'w+')
        print("pickle file:", args.save_file)
        pickle.dump(gradient, file)
        file.close()
Beispiel #27
0
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import LineCollection
import numpy as np
import matplotlib.tri as mtri
from matplotlib.tri import tricontour
from matplotlib.tri import TriContourSet
from mpi4py.MPI import COMM_WORLD as comm
from common.io import remove_safe

rank = comm.Get_rank()
size = comm.Get_size()


__all__ = ["plot_edges", "plot_faces", "plot_contour", "plot_quiver",
           "zero_level_set", "plot_fancy", "plot_any_field"]


class Figure:
    def __init__(self, title=None, show=True, aspect_equal=True,
                 save=None, base_fig=None, xlabel="x", ylabel="y",
                 colorbar=True, clabel=None, subplots=False,
                 tight_layout=False, ticks=True):
        self.title = title
        self.show = show
        self.aspect_equal = aspect_equal
        self.save = save
        self.base_fig = base_fig
        self.colorbar = colorbar
        self.subplots = subplots
        self.colorbar_ax = None
Beispiel #28
0
import pickle
import time
import resource
import psutil
import warnings

import multiprocessing as mp

from glob import glob

import mdtraj as md

from sklearn.utils import check_random_state

from mpi4py.MPI import COMM_WORLD as COMM
RANKSTR = "[Rank %s]" % COMM.Get_rank()

logging.basicConfig(
    level=logging.DEBUG if COMM.Get_rank() == 0 else logging.INFO,
    format=('%(asctime)s ' + RANKSTR +
            ' %(name)-26s %(levelname)-7s %(message)s'),
    datefmt='%m-%d-%Y %H:%M:%S')

from enspara.mpi import MPI_RANK, MPI_SIZE
from enspara import mpi

from enspara.cluster.util import load_frames, partition_indices
from enspara.cluster.kcenters import kcenters_mpi
from enspara.cluster.kmedoids import _kmedoids_pam_update

from enspara.apps.util import readable_dir
Beispiel #29
0
 def test_rank(sut):
     rank = sut()
     assert rank == COMM_WORLD.Get_rank()
Beispiel #30
0
    initial_prop_components)

# -----------------------------------------------------------------------------------------------------------------------

# In ``pmc.py`` the following line defines the sequential single process sampler:
# sampler = pypmc.sampler.importance_sampling.ImportanceSampler(log_target, initial_proposal)
#
# We now use the parallel MPISampler instead:
SequentialIS = pypmc.sampler.importance_sampling.ImportanceSampler
parallel_sampler = pypmc.tools.parallel_sampler.MPISampler(
    SequentialIS, target=log_target, proposal=initial_proposal)

# Draw 10,000 samples adapting the proposal every 1,000 samples:

# make sure that every process has a different random number generator seed
if comm.Get_rank() == 0:
    seed = np.random.randint(1e5)
else:
    seed = None
seed = comm.bcast(seed)
np.random.seed(seed + comm.Get_rank())

generating_components = []
for i in range(10):
    # With the invocation "mpirun -n 10 python pmc_mpi.py", there are
    # 10 processes which means in order to draw 1,000 samples
    # ``parallel_sampler.run(1000//comm.Get_size())`` makes each process draw
    # 100 samples.
    # Hereby the generating proposal component for each sample in each process
    # is returned by ``parallel_sampler.run``.
    # In the master process, ``parallel_sampler.run`` is a list containing the