예제 #1
0
    def test_available(self):
        test_dir = test_subdir_create("targets_test_available")
        input_mtl = os.path.join(test_dir, "mtl.fits")
        input_std = os.path.join(test_dir, "standards.fits")
        input_sky = os.path.join(test_dir, "sky.fits")
        input_suppsky = os.path.join(test_dir, "suppsky.fits")
        tgoff = 0
        nscience = sim_targets(input_mtl, TARGET_TYPE_SCIENCE, tgoff)
        tgoff += nscience
        nstd = sim_targets(input_std, TARGET_TYPE_STANDARD, tgoff)
        tgoff += nstd
        nsky = sim_targets(input_sky, TARGET_TYPE_SKY, tgoff)
        tgoff += nsky
        nsuppsky = sim_targets(input_suppsky, TARGET_TYPE_SUPPSKY, tgoff)

        tgs = Targets()
        load_target_file(tgs, input_mtl)
        load_target_file(tgs, input_std)
        load_target_file(tgs, input_sky)
        load_target_file(tgs, input_suppsky)
        print(tgs)

        # Test access
        ids = tgs.ids()
        tt = tgs.get(ids[0])
        tt.ra += 1.0e-5
        tt.dec += 1.0e-5
        tt.subpriority = 0.99

        # Create a hierarchical triangle mesh lookup of the targets positions
        tree = TargetTree(tgs, 0.01)

        # Compute the targets available to each fiber for each tile.
        hw = load_hardware()
        tfile = os.path.join(test_dir, "footprint.fits")
        sim_tiles(tfile)
        tiles = load_tiles(tiles_file=tfile)
        tgsavail = TargetsAvailable(hw, tgs, tiles, tree)

        # Free the tree
        del tree

        # Compute the fibers on all tiles available for each target
        favail = LocationsAvailable(tgsavail)

        return
예제 #2
0
def main():
    log = Logger.get()

    mpi_procs = MPI.COMM_WORLD.size
    mpi_rank = MPI.COMM_WORLD.rank

    parser = argparse.ArgumentParser()

    parser.add_argument(
        "--survey_log",
        type=str,
        required=False,
        help="Eventually we would pass in a file containing the log"
        " of when each fiber assignment was run and for which tiles, "
        "along with the options that were used.")

    parser.add_argument(
        "--sky",
        type=str,
        required=False,
        help="Input file with sky or supp_sky targets.  "
        "These target files are assumed to be constant and not "
        "tracked by the MTL ledger.")

    parser.add_argument(
        "--mtl",
        type=str,
        required=True,
        help="The MTL ledger.  This is still a work in progress and"
        " I am not sure what the interface will be, but given the "
        "fiber assignment dates in the survey log, we should be able"
        " to get the MTL state at that time.  For now, this option"
        " is just one or more target files.")

    parser.add_argument("--footprint",
                        type=str,
                        required=False,
                        default=None,
                        help="Optional FITS file defining the footprint.  If"
                        " not specified, the default footprint from desimodel"
                        " is used.")

    parser.add_argument("--tiles",
                        type=str,
                        required=False,
                        default=None,
                        help="Optional text file containing a subset of the"
                        " tile IDs to use in the footprint, one ID per line."
                        " Default uses all tiles in the footprint.")

    parser.add_argument("--out",
                        type=str,
                        required=False,
                        default=None,
                        help="Output directory.")

    parser.add_argument("--realizations",
                        type=int,
                        required=False,
                        default=10,
                        help="Number of realizations.")

    args = parser.parse_args()

    if args.sky is None:
        args.sky = list()

    # Set output directory
    if args.out is None:
        args.out = "."

    # Read tiles we are using
    tileselect = None
    if args.tiles is not None:
        tileselect = list()
        with open(args.tiles, "r") as f:
            for line in f:
                # Try to convert the first column to an integer.
                try:
                    tileselect.append(int(line.split()[0]))
                except ValueError:
                    pass
    tiles = load_tiles(
        tiles_file=args.footprint,
        select=tileselect,
    )

    # Create empty target list
    tgs = Targets()

    # Append each input target file.  These target files must all be of the
    # same survey type, and will set the Targets object to be of that survey.

    print(args.mtl)
    print(args.sky)
    #for tgfile in args.targets:
    #    load_target_file(tgs, tgfile)
    load_target_file(tgs, args.mtl)

    # Just the science target IDs
    tg_science = tgs.ids()
    tg_science2indx = {y: x for x, y in enumerate(tg_science)}

    # Now load the sky target files.
    survey = tgs.survey()
    #for tgfile in args.sky:
    #    load_target_file(tgs, tgfile)
    load_target_file(tgs, args.sky)

    # Divide up realizations among the processes.

    n_realization = args.realizations
    realizations = np.arange(n_realization, dtype=np.int32)
    my_realizations = np.array_split(realizations, mpi_procs)[mpi_rank]

    # Bitarray for all targets and realizations
    #tgarray = bitarray(len(tg_science) * n_realization)
    #tgarray.setall(False)
    tgarray = np.zeros(len(tg_science) * n_realization, dtype='bool')

    # Target tree
    tree = TargetTree(tgs)

    hw = load_hardware()

    for realization in my_realizations:
        # Set the seed based on the realization, so that the result is reproducible
        # regardless of which process is working on the realization.
        np.random.seed(realization)

        # Comment out the next block to avoid randomizing subpriority
        # ----
        # Randomize science target subpriority for this realization
        new_subpriority = np.random.random_sample(size=len(tg_science))
        for indx, tgid in enumerate(tg_science):
            tg = tgs.get(tgid)
            tg.subpriority = new_subpriority[indx]

        # Comment out the next block to avoid dithering tiles
        # ----
        # Dither tiles centers by the same

        # Compute available targets / locations

        tgsavail = TargetsAvailable(hw, tgs, tiles, tree)

        favail = LocationsAvailable(tgsavail)

        asgn = Assignment(tgs, tgsavail, favail)

        # Replay the survey log for each time fiber assignment was run.  For now, this
        # is just doing the tiles all at once.
        for assign_event in range(1):
            # In the future, load MTL updates to the obs remaining for each target here

            # Read hardware properties- in the future, pass in the assignment run date
            # to this function.
            hw = load_hardware()

            # Run assignment for this event.
            run(asgn)

            # Update bit arrays for assigned science targets
            for tile_id in tiles.id:  #():
                adata = asgn.tile_location_target(tile_id)
                for loc, tgid in adata.items():
                    try:
                        idx = tg_science2indx[tgid]
                        tgarray[idx * n_realization + realization] = True
                    except KeyError:
                        # Not a science target
                        pass

    # Reduce bitarrays to root process.  The bitarray type conforms to the
    # buffer protocol.

    tgall = None
    #if mpi_rank == 0:
    #    tgall = bitarray(tgarray)
    #    tgall.setall(False)

    MPI.COMM_WORLD.Reduce(tgarray, tgall, op=MPI.BOR, root=0)

    # Write it out

    if mpi_rank == 0:
        #pass
        print(len(tgall))
예제 #3
0
# Load Target data.
tgs = Targets()

# First load the MTL file and compute the target IDs.
load_target_file(tgs, mtlfile, random_generator=random_generator)

# --------------------------------------------------------------------------------------------------
# TARGET IDs IN PARALLEL
#
# Target IDs for each tracer in parallel.
# The total number of targets is split almost evenly between all processes. Each MPI task takes a
# portion of the total targets and extract the corresponding target IDs for each type of tracer.
# --------------------------------------------------------------------------------------------------

# Total number of targets.
ntargets = len(tgs.ids())

# Dictionary with DESI bitmask values for LRGs, ELGs and QSOs.
desi_bitmask = {'lrg': 65537, 'elg': 131074, 'qso': 262148}

# Targets that each MPI task will process (except for the last one).
targets_per_process = ntargets // size

# Extraction of target IDs for each tracer of each process.
if rank == size - 1:
    initial_index = rank * targets_per_process
    lrg_targets_ids_local = np.array([tid for tid in tgs.ids()[initial_index:] if \
                                      (tgs.get(tid).desi_target == desi_bitmask['lrg'])],
                                     dtype=np.int64)
    elg_targets_ids_local = np.array([tid for tid in tgs.ids()[initial_index:] if \
                                      (tgs.get(tid).desi_target == desi_bitmask['elg'])],