Beispiel #1
0
def fiber_area_arcsec2(x, y):
    '''
    Returns area of fibers at (x,y) in arcsec^2
    '''
    from desimodel.io import load_desiparams, load_platescale
    params = load_desiparams()
    fiber_dia = params['fibers']['diameter_um']
    x = np.asarray(x)
    y = np.asarray(y)
    r = np.sqrt(x**2 + y**2)

    #- Platescales in um/arcsec
    ps = load_platescale()
    radial_scale = np.interp(r, ps['radius'], ps['radial_platescale'])
    az_scale = np.interp(r, ps['radius'], ps['az_platescale'])

    #- radial and azimuthal fiber radii in arcsec
    rr = 0.5 * fiber_dia / radial_scale
    raz = 0.5 * fiber_dia / az_scale
    fiber_area = (np.pi * rr * raz)
    return fiber_area
Beispiel #2
0
def psf_to_fiber_flux_correction(fibermap, exposure_seeing_fwhm=1.1):
    """
    Multiplicative factor to apply to the psf flux of a fiber
    to obtain the fiber flux, given the current exposure seeing.
    The fiber flux is the flux one would collect for this object in a fiber of 1.5 arcsec diameter,
    for a 1 arcsec seeing, FWHM (same definition as for the Legacy Surveys).

    Args:
      fibermap: fibermap of frame, astropy.table.Table
      exposure_seeing_fwhm: seeing FWHM in arcsec

    Returns: 1D numpy array with correction factor to apply to fiber fielded fluxes, valid for any sources.
    """

    log = get_logger()

    for k in ["FIBER_X", "FIBER_Y"]:
        if k not in fibermap.dtype.names:
            log.warning(
                "no column '{}' in fibermap, cannot do the flat_to_psf correction, returning 1"
                .format(k))
            return np.ones(len(fibermap))

    # compute the seeing and plate scale correction

    fa = FastFiberAcceptance()
    x_mm = fibermap["FIBER_X"]
    y_mm = fibermap["FIBER_Y"]
    bad = np.isnan(x_mm) | np.isnan(y_mm)
    x_mm[bad] = 0.
    y_mm[bad] = 0.

    if "DELTA_X" in fibermap.dtype.names:
        dx_mm = fibermap["DELTA_X"]  # mm
    else:
        log.warning("no column 'DELTA_X' in fibermap, assume = zero")
        dx_mm = np.zeros(len(fibermap))

    if "DELTA_Y" in fibermap.dtype.names:
        dy_mm = fibermap["DELTA_Y"]  # mm
    else:
        log.warning("no column 'DELTA_Y' in fibermap, assume = zero")
        dy_mm = np.zeros(len(fibermap))

    bad = np.isnan(dx_mm) | np.isnan(dy_mm)
    dx_mm[bad] = 0.
    dy_mm[bad] = 0.

    ps = load_platescale()
    isotropic_platescale = np.interp(x_mm**2 + y_mm**2, ps['radius']**2,
                                     np.sqrt(ps['radial_platescale'] *
                                             ps['az_platescale']))  # um/arcsec
    # we could include here a wavelength dependence on seeing
    sigmas_um = exposure_seeing_fwhm / 2.35 * isotropic_platescale  # um
    offsets_um = np.sqrt(dx_mm**2 + dy_mm**2) * 1000.  # um
    nfibers = len(fibermap)

    if "MORPHTYPE" in fibermap.dtype.names:
        point_sources = (fibermap["MORPHTYPE"] == "PSF")
    else:
        log.warning(
            "no column 'MORPHTYPE' in fibermap, assume all point sources.")
        point_sources = np.repeat(True, len(fibermap))

    extended_sources = ~point_sources

    if "SHAPE_R" in fibermap.dtype.names:
        half_light_radius_arcsec = fibermap["SHAPE_R"]
    else:
        log.warning("no column 'SHAPE_R' in fibermap, assume = zero")
        half_light_radius_arcsec = np.zeros(len(fibermap))

    # saturate half_light_radius_arcsec at 2 arcsec
    # larger values would have extrapolated fiberfrac
    # when in fact the ratio of fiberfrac for difference seeing
    # or fiebr angular size are similar
    max_radius = 2.0
    half_light_radius_arcsec[
        half_light_radius_arcsec > max_radius] = max_radius

    # for current seeing, fiber plate scale , fiber size ...
    current_fiber_frac_point_source = fa.value("POINT", sigmas_um, offsets_um)
    current_fiber_frac = current_fiber_frac_point_source.copy()
    # for the moment use result for an exponential disk profile
    current_fiber_frac[extended_sources] = fa.value(
        "DISK", sigmas_um[extended_sources], offsets_um[extended_sources],
        half_light_radius_arcsec[extended_sources])

    # for "nominal" fiber size of 1.5 arcsec, and seeing of 1.
    nominal_isotropic_platescale = 107 / 1.5  # um/arcsec
    sigmas_um = 1.0 / 2.35 * nominal_isotropic_platescale * np.ones(
        nfibers)  # um
    offsets_um = np.zeros(nfibers)  # um , no offset

    nominal_fiber_frac_point_source = fa.value("POINT", sigmas_um, offsets_um)
    nominal_fiber_frac = nominal_fiber_frac_point_source.copy()
    nominal_fiber_frac[extended_sources] = fa.value(
        "DISK", sigmas_um[extended_sources], offsets_um[extended_sources],
        half_light_radius_arcsec[extended_sources])

    # legacy survey fiber frac
    #selection = (fibermap["MORPHTYPE"]=="PSF")&(fibermap["FLUX_R"]>0)
    #imaging_fiber_frac_for_point_source = np.sum(fibermap["FIBERFLUX_R"][selection]*fibermap["FLUX_R"][selection])/np.sum(fibermap["FLUX_R"][selection]**2)
    #imaging_fiber_frac = imaging_fiber_frac_for_point_source*np.ones(nfibers) # default is value for point sources
    #selection = (fibermap["FLUX_R"]>1)
    #imaging_fiber_frac[selection] = fibermap["FIBERFLUX_R"][selection]/fibermap["FLUX_R"][selection]
    #to_saturate = (imaging_fiber_frac[selection]>imaging_fiber_frac_for_point_source)
    #if np.sum(to_saturate)>0 :
    #    imaging_fiber_frac[selection][to_saturate] = imaging_fiber_frac_for_point_source # max is point source value
    """
    uncalibrated flux     ~= current_fiber_frac * total_flux
    psf calibrated flux   ~= current_fiber_frac * total_flux / current_fiber_frac_point_source
    fiber flux            = nominal_fiber_frac * total_flux

    the multiplicative factor to apply to the current psf calibrated flux is:
    correction_current = (fiber flux)/(psf calibrated flux) = nominal_fiber_frac / current_fiber_frac * current_fiber_frac_point_source

    multiply by normalization between the fast fiber acceptance computation (using moffat with beta=3.5) and the one
    done for the imaging surveys assuming a Gaussian seeing of sigma=1/2.35 arcsec and a fiber of 1.5 arcsec diameter

    """

    # compute normalization between the fast fiber acceptance computation and the one
    # done the imaging surveys assuming a Gaussian seeing of sigma=1/2.35 arcsec and a fiber of 1.5 arcsec diameter
    scale = 0.789 / np.mean(nominal_fiber_frac_point_source)
    nominal_fiber_frac *= scale

    corr = current_fiber_frac_point_source
    ok = (current_fiber_frac > 0.01)
    corr[ok] *= (nominal_fiber_frac[ok] / current_fiber_frac[ok])
    corr[~ok] *= 0.

    return corr
Beispiel #3
0
def flat_to_psf_flux_correction(fibermap, exposure_seeing_fwhm=1.1):
    """
    Multiplicative factor to apply to the flat-fielded spectroscopic flux of a fiber
    to calibrate the spectrum of a point source, given the current exposure seeing

    Args:
      fibermap: fibermap of frame, astropy.table.Table
      exposure_seeing_fwhm: seeing FWHM in arcsec

    Returns: 1D numpy array with correction factor to apply to fiber fielded fluxes, valid for point sources.
    """

    log = get_logger()

    for k in ["FIBER_X", "FIBER_Y"]:
        if k not in fibermap.dtype.names:
            log.warning(
                "no column '{}' in fibermap, cannot do the flat_to_psf correction, returning 1"
            )
            return np.ones(len(fibermap))

    #- Compute point source flux correction and fiber flux correction
    fa = FastFiberAcceptance()
    x_mm = fibermap["FIBER_X"]
    y_mm = fibermap["FIBER_Y"]
    bad = np.isnan(x_mm) | np.isnan(y_mm)
    x_mm[bad] = 0.
    y_mm[bad] = 0.

    if "DELTA_X" in fibermap.dtype.names:
        dx_mm = fibermap["DELTA_X"]  # mm
    else:
        log.warning("no column 'DELTA_X' in fibermap, assume DELTA_X=0")
        dx_mm = np.zeros(len(fibermap))

    if "DELTA_Y" in fibermap.dtype.names:
        dy_mm = fibermap["DELTA_Y"]  # mm
    else:
        log.warning("no column 'DELTA_Y' in fibermap, assume DELTA_Y=0")
        dy_mm = np.zeros(len(fibermap))

    bad = np.isnan(dx_mm) | np.isnan(dy_mm)
    dx_mm[bad] = 0.
    dy_mm[bad] = 0.

    ps = load_platescale()
    isotropic_platescale = np.interp(x_mm**2 + y_mm**2, ps['radius']**2,
                                     np.sqrt(ps['radial_platescale'] *
                                             ps['az_platescale']))  # um/arcsec
    sigmas_um = exposure_seeing_fwhm / 2.35 * isotropic_platescale  # um
    offsets_um = np.sqrt(dx_mm**2 + dy_mm**2) * 1000.  # um

    fiber_frac = fa.value("POINT", sigmas_um, offsets_um)
    # at large r,
    #  isotropic_platescale is larger
    #  fiber angular size is smaller
    #  fiber flat is smaller
    #  fiber flat correction is larger
    #  have to divide by isotropic_platescale^2
    ok = (fiber_frac > 0.01)
    point_source_correction = np.zeros(x_mm.shape)
    point_source_correction[
        ok] = 1. / fiber_frac[ok] / isotropic_platescale[ok]**2

    # normalize to one because this is a relative correction here
    point_source_correction[ok] /= np.mean(point_source_correction[ok])

    return point_source_correction
Beispiel #4
0
##
_bitdefs = load_mask_bits("sv1")

desi_mask = BitMask('sv1_desi_mask', _bitdefs)
bgs_mask = BitMask('sv1_bgs_mask', _bitdefs)

types = bgs_mask.names()
bits = [bgs_mask.bitnum(x) for x in types]

##  L428 of https://github.com/desihub/desimodel/blob/master/py/desimodel/focalplane/geometry.py
params = load_desiparams()
fiber_dia = params['fibers']['diameter_um']

#- Platescales in um/arcsec
ps = load_platescale()

##  Add in GAMA labels.
_fits = fits.open(
    '/global/cscratch1/sd/mjwilson/BGS/SV-ASSIGN/truth/legacy/ls-GAMA-south.fits'
)
gama = Table(_fits[1].data)

for tile in utiles:
    ##  Scrape from the tile picker site.
    ##  cmd = 'wget http://www.astro.utah.edu/~u6022465/SV/tiles/SV_BGS/fits_files/tile-{:06}.fits -O /global/cscratch1/sd/mjwilson/BGS/SV-ASSIGN/fiberassign/tile-{:06}.fits'.format(tile, tile)
    ##  os.system(cmd)
    try:
        print('Solving for Tile {}.'.format(tile))

        fname = '/global/cscratch1/sd/mjwilson/BGS/SV-ASSIGN/mtls/svmtl_{:06}.fits'.format(
Beispiel #5
0
def load_hardware(focalplane=None, rundate=None):
    """Create a hardware class representing properties of the telescope.

    Args:
        focalplane (tuple):  Override the focalplane model.  If not None, this
            should be a tuple of the same data types returned by
            desimodel.io.load_focalplane()
        rundate (str):  ISO 8601 format time stamp as a string in the
            format YYYY-MM-DDTHH:MM:SS.  If None, uses current time.

    Returns:
        (Hardware):  The hardware object.

    """
    log = Logger.get()

    # The timestamp for this run.
    runtime = None
    if rundate is None:
        runtime = datetime.utcnow()
    else:
        runtime = datetime.strptime(rundate, "%Y-%m-%dT%H:%M:%S")

    # Get the focalplane information
    fp = None
    exclude = None
    state = None
    tmstr = "UNKNOWN"
    if focalplane is None:
        fp, exclude, state, tmstr = dmio.load_focalplane(runtime)
    else:
        fp, exclude, state = focalplane

    # Get the plate scale
    platescale = dmio.load_platescale()

    # We are going to do a quadratic interpolation to the platescale on a fine grid,
    # and then use that for *linear* interpolation inside the compiled code.  The
    # default platescale data is on a one mm grid spacing.  We also do the same
    # interpolation of the arclength S(R).

    fine_radius = np.linspace(platescale["radius"][0],
                              platescale["radius"][-1],
                              num=10000,
                              dtype=np.float64)
    fn = interp1d(platescale["radius"], platescale["theta"], kind="quadratic")
    fine_theta = fn(fine_radius).astype(np.float64)
    fn = interp1d(platescale["radius"],
                  platescale["arclength"],
                  kind="quadratic")
    fine_arc = fn(fine_radius).astype(np.float64)

    # We are only going to keep rows for LOCATIONs that are assigned to a
    # science or sky monitor positioner.

    log.info("Loaded focalplane for time stamp {}".format(runtime))

    pos_rows = np.where(fp["DEVICE_TYPE"].astype(str) == "POS")[0]
    etc_rows = np.where(fp["DEVICE_TYPE"].astype(str) == "ETC")[0]
    keep_rows = np.unique(np.concatenate((pos_rows, etc_rows)))

    nloc = len(keep_rows)
    log.debug(
        "  focalplane table keeping {} rows for POS and ETC devices".format(
            nloc))

    device_type = np.full(nloc, "OOPSBUG", dtype="a8")
    device_type[:] = fp["DEVICE_TYPE"][keep_rows]

    locations = np.copy(fp["LOCATION"][keep_rows])

    # Map location to row in the table

    loc_to_fp = dict()
    for rw, loc in enumerate(fp["LOCATION"]):
        loc_to_fp[loc] = rw

    # FIXME:  Here we assume that the 32bit STATE column has the same bit
    # definitions as what is used by fiberassign (defined in hardware.h):
    # If this is not true, then re-map those values here inside the "state"
    # table loaded above.

    # Map location to row of the state table

    loc_to_state = dict()
    for rw, loc in enumerate(state["LOCATION"]):
        loc_to_state[loc] = rw

    # Convert the exclusion polygons into shapes.

    excl = dict()

    for nm, shp in exclude.items():
        excl[nm] = dict()
        for obj in shp.keys():
            cr = list()
            for crc in shp[obj]["circles"]:
                cr.append(Circle(crc[0], crc[1]))
            sg = list()
            for sgm in shp[obj]["segments"]:
                sg.append(Segments(sgm))
            fshp = Shape((0.0, 0.0), cr, sg)
            excl[nm][obj] = fshp

    # For each positioner, select the exclusion polynomials.

    positioners = dict()

    for loc in locations:
        exclname = state["EXCLUSION"][loc_to_state[loc]]
        positioners[loc] = dict()
        positioners[loc]["theta"] = Shape(excl[exclname]["theta"])
        positioners[loc]["phi"] = Shape(excl[exclname]["phi"])
        if "gfa" in excl[exclname]:
            positioners[loc]["gfa"] = Shape(excl[exclname]["gfa"])
        else:
            positioners[loc]["gfa"] = Shape()
        if "petal" in excl[exclname]:
            positioners[loc]["petal"] = Shape(excl[exclname]["petal"])
        else:
            positioners[loc]["petal"] = Shape()

    hw = Hardware(
        tmstr, locations, fp["PETAL"][keep_rows], fp["DEVICE"][keep_rows],
        fp["SLITBLOCK"][keep_rows], fp["BLOCKFIBER"][keep_rows],
        fp["FIBER"][keep_rows], device_type, fp["OFFSET_X"][keep_rows],
        fp["OFFSET_Y"][keep_rows],
        np.array([state["STATE"][loc_to_state[x]] for x in locations]),
        np.array([fp["OFFSET_T"][loc_to_fp[x]] for x in locations]),
        np.array([fp["MIN_T"][loc_to_fp[x]] for x in locations]),
        np.array([fp["MAX_T"][loc_to_fp[x]] for x in locations]),
        np.array([fp["LENGTH_R1"][loc_to_fp[x]] for x in locations]),
        np.array([fp["OFFSET_P"][loc_to_fp[x]] for x in locations]),
        np.array([fp["MIN_P"][loc_to_fp[x]] for x in locations]),
        np.array([fp["MAX_P"][loc_to_fp[x]] for x in locations]),
        np.array([fp["LENGTH_R2"][loc_to_fp[x]]
                  for x in locations]), fine_radius, fine_theta, fine_arc,
        [positioners[x]["theta"]
         for x in locations], [positioners[x]["phi"] for x in locations],
        [positioners[x]["gfa"]
         for x in locations], [positioners[x]["petal"] for x in locations])
    return hw
Beispiel #6
0
def load_hardware(focalplane=None, rundate=None):
    """Create a hardware class representing properties of the telescope.

    Args:
        focalplane (tuple):  Override the focalplane model.  If not None, this
            should be a tuple of the same data types returned by
            desimodel.io.load_focalplane()
        rundate (str):  ISO 8601 format time stamp as a string in the
            format YYYY-MM-DDTHH:MM:SS+-zz:zz.  If None, uses current time.

    Returns:
        (Hardware):  The hardware object.

    """
    log = Logger.get()

    # The timestamp for this run.
    runtime = None
    if rundate is None:
        runtime = datetime.now(tz=timezone.utc)
    else:
        try:
            runtime = datetime.strptime(rundate, "%Y-%m-%dT%H:%M:%S%z")
        except ValueError:
            runtime = datetime.strptime(rundate, "%Y-%m-%dT%H:%M:%S")
            msg = "Requested run date '{}' is not timezone-aware.  Assuming UTC.".format(
                runtime)
            log.warning(msg)
            runtime = runtime.replace(tzinfo=timezone.utc)
    runtimestr = None
    try:
        runtimestr = runtime.isoformat(timespec="seconds")
    except TypeError:
        runtimestr = runtime.isoformat()

    # Get the focalplane information
    fp = None
    exclude = None
    state = None
    create_time = "UNKNOWN"
    if focalplane is None:
        fp, exclude, state, create_time = dmio.load_focalplane(runtime)
    else:
        fp, exclude, state = focalplane

    # Get the plate scale
    platescale = dmio.load_platescale()

    # We are going to do a quadratic interpolation to the platescale on a fine grid,
    # and then use that for *linear* interpolation inside the compiled code.  The
    # default platescale data is on a one mm grid spacing.  We also do the same
    # interpolation of the arclength S(R).

    fine_radius = np.linspace(platescale["radius"][0],
                              platescale["radius"][-1],
                              num=10000,
                              dtype=np.float64)
    fn = interp1d(platescale["radius"], platescale["theta"], kind="quadratic")
    fine_theta = fn(fine_radius).astype(np.float64)
    fn = interp1d(platescale["radius"],
                  platescale["arclength"],
                  kind="quadratic")
    fine_arc = fn(fine_radius).astype(np.float64)

    # We are only going to keep rows for LOCATIONs that are assigned to a
    # science or sky monitor positioner.

    log.info("Loaded focalplane for time stamp {}".format(runtime))

    pos_rows = np.where(fp["DEVICE_TYPE"].astype(str) == "POS")[0]
    etc_rows = np.where(fp["DEVICE_TYPE"].astype(str) == "ETC")[0]
    keep_rows = np.unique(np.concatenate((pos_rows, etc_rows)))

    nloc = len(keep_rows)
    log.debug(
        "  focalplane table keeping {} rows for POS and ETC devices".format(
            nloc))

    device_type = np.full(nloc, "OOPSBUG", dtype="a8")
    device_type[:] = fp["DEVICE_TYPE"][keep_rows]

    locations = np.copy(fp["LOCATION"][keep_rows])

    # Map location to row in the table

    loc_to_fp = dict()
    for rw, loc in enumerate(fp["LOCATION"]):
        loc_to_fp[loc] = rw

    # FIXME:  Here we assume that the 32bit STATE column has the same bit
    # definitions as what is used by fiberassign (defined in hardware.h):
    # If this is not true, then re-map those values here inside the "state"
    # table loaded above.

    # Map location to row of the state table

    loc_to_state = dict()
    for rw, loc in enumerate(state["LOCATION"]):
        loc_to_state[loc] = rw

    # Convert the exclusion polygons into shapes.

    excl = dict()

    for nm, shp in exclude.items():
        excl[nm] = dict()
        for obj in shp.keys():
            cr = list()
            for crc in shp[obj]["circles"]:
                cr.append(Circle(crc[0], crc[1]))
            sg = list()
            for sgm in shp[obj]["segments"]:
                sg.append(Segments(sgm))
            fshp = Shape((0.0, 0.0), cr, sg)
            excl[nm][obj] = fshp

    # For each positioner, select the exclusion polynomials.

    positioners = dict()

    for loc in locations:
        exclname = state["EXCLUSION"][loc_to_state[loc]]
        positioners[loc] = dict()
        positioners[loc]["theta"] = Shape(excl[exclname]["theta"])
        positioners[loc]["phi"] = Shape(excl[exclname]["phi"])
        if "gfa" in excl[exclname]:
            positioners[loc]["gfa"] = Shape(excl[exclname]["gfa"])
        else:
            positioners[loc]["gfa"] = Shape()
        if "petal" in excl[exclname]:
            positioners[loc]["petal"] = Shape(excl[exclname]["petal"])
        else:
            positioners[loc]["petal"] = Shape()

    hw = None
    if "MIN_P" in state.colnames:
        # This is a new-format focalplane model (after desimodel PR #143)
        hw = Hardware(
            runtimestr,
            locations,
            fp["PETAL"][keep_rows],
            fp["DEVICE"][keep_rows],
            fp["SLITBLOCK"][keep_rows],
            fp["BLOCKFIBER"][keep_rows],
            fp["FIBER"][keep_rows],
            device_type,
            fp["OFFSET_X"][keep_rows],
            fp["OFFSET_Y"][keep_rows],
            np.array([state["STATE"][loc_to_state[x]] for x in locations]),
            np.array([fp["OFFSET_T"][loc_to_fp[x]] for x in locations]),
            np.array([state["MIN_T"][loc_to_state[x]] for x in locations]),
            np.array([state["MAX_T"][loc_to_state[x]] for x in locations]),
            np.array([state["POS_T"][loc_to_state[x]] for x in locations]),
            np.array([fp["LENGTH_R1"][loc_to_fp[x]] for x in locations]),
            np.array([fp["OFFSET_P"][loc_to_fp[x]] for x in locations]),
            np.array([state["MIN_P"][loc_to_state[x]] for x in locations]),
            np.array([state["MAX_P"][loc_to_state[x]] for x in locations]),
            np.array([state["POS_P"][loc_to_state[x]] for x in locations]),
            np.array([fp["LENGTH_R2"][loc_to_fp[x]] for x in locations]),
            fine_radius,
            fine_theta,
            fine_arc,
            [positioners[x]["theta"] for x in locations],
            [positioners[x]["phi"] for x in locations],
            [positioners[x]["gfa"] for x in locations],
            [positioners[x]["petal"] for x in locations],
        )
    else:
        # This is an old-format focalplane model (prior to desimodel PR #143).  For
        # stuck positioners, we want to specify a default POS_T / POS_P to use.
        # These old models did not include any information about that, so we use the
        # minimum Theta value and either the maximum Phi value or PI, whichever is
        # smaller
        fake_pos_p = np.zeros(len(locations), dtype=np.float64)
        fake_pos_t = np.zeros(len(locations), dtype=np.float64)
        for ilid, lid in enumerate(locations):
            pt = fp["MIN_T"][loc_to_fp[lid]] + fp["OFFSET_T"][loc_to_fp[lid]]
            pp = fp["MAX_P"][loc_to_fp[lid]] + fp["OFFSET_P"][loc_to_fp[lid]]
            if pp > 180.0:
                pp = 180.0
            fake_pos_p[ilid] = pp
            fake_pos_t[ilid] = pt
        hw = Hardware(
            runtimestr,
            locations,
            fp["PETAL"][keep_rows],
            fp["DEVICE"][keep_rows],
            fp["SLITBLOCK"][keep_rows],
            fp["BLOCKFIBER"][keep_rows],
            fp["FIBER"][keep_rows],
            device_type,
            fp["OFFSET_X"][keep_rows],
            fp["OFFSET_Y"][keep_rows],
            np.array([state["STATE"][loc_to_state[x]] for x in locations]),
            np.array([fp["OFFSET_T"][loc_to_fp[x]] for x in locations]),
            np.array([fp["MIN_T"][loc_to_fp[x]] for x in locations]),
            np.array([fp["MAX_T"][loc_to_fp[x]] for x in locations]),
            fake_pos_t,
            np.array([fp["LENGTH_R1"][loc_to_fp[x]] for x in locations]),
            np.array([fp["OFFSET_P"][loc_to_fp[x]] for x in locations]),
            np.array([fp["MIN_P"][loc_to_fp[x]] for x in locations]),
            np.array([fp["MAX_P"][loc_to_fp[x]] for x in locations]),
            fake_pos_p,
            np.array([fp["LENGTH_R2"][loc_to_fp[x]] for x in locations]),
            fine_radius,
            fine_theta,
            fine_arc,
            [positioners[x]["theta"] for x in locations],
            [positioners[x]["phi"] for x in locations],
            [positioners[x]["gfa"] for x in locations],
            [positioners[x]["petal"] for x in locations],
        )
    return hw