示例#1
0
def gfa2fp(petal_loc, xgfa, ygfa):
    """
    Transforms from GFA pixel coordinates to focal plane mm

    Args:
        petal_loc (int): Petal location 0-9
        xgfa, ygfa: GFA pixel coordinates, (0,0) is corner pixel center

    Returns CS5 xfp, yfp in mm
    """
    global _gfa_transforms
    if _gfa_transforms is None:
        metrology = io.load_metrology()
        _gfa_transforms = fit_gfa2fp(metrology)

    log = get_logger()
    if petal_loc not in _gfa_transforms:
        log.error('PETAL_LOC {} GFA metrology missing'.format(petal_loc))

    xfp, yfp = _gfa_transforms[petal_loc].apply(xgfa, ygfa)

    return xfp, yfp
示例#2
0
文件: zb.py 项目: dstndstn/desimeter
    def fit(self, spots, metrology=None, update_spots=False):
        """TODO: document"""
        log = get_logger()
        if metrology is not None:
            self.metrology = metrology
        else:
            filename = resource_filename('desimeter', "data/fp-metrology.csv")
            if not os.path.isfile(filename):
                log.error("cannot find {}".format(filename))
                raise IOError("cannot find {}".format(filename))
            log.info("reading fiducials metrology in {}".format(filename))
            self.metrology = Table.read(filename, format="csv")

        #- Trim spots to just fiducial spots (not posioners, not unmatchs spots)
        ii = (spots['LOCATION'] > 0) & (spots['PINHOLE_ID'] > 0)
        fidspots = spots[ii]

        #- trim metrology to just the ones that have spots
        fidspots_pinloc = fidspots['LOCATION'] * 10 + fidspots['PINHOLE_ID']
        metro_pinloc = self.metrology['LOCATION'] * 10 + self.metrology[
            'PINHOLE_ID']
        jj = np.in1d(metro_pinloc, fidspots_pinloc)
        metrology = self.metrology[jj]

        #- Sort so that they match each other
        fidspots.sort(keys=('LOCATION', 'PINHOLE_ID'))
        metrology.sort(keys=('LOCATION', 'PINHOLE_ID'))
        assert np.all(fidspots['LOCATION'] == metrology['LOCATION'])
        assert np.all(fidspots['PINHOLE_ID'] == metrology['PINHOLE_ID'])

        #- Get reduced coordinates
        rxpix, rypix = _reduce_xyfvc(fidspots['XPIX'], fidspots['YPIX'])
        rxfp, ryfp = _reduce_xyfp(metrology['X_FP'], metrology['Y_FP'])

        #- Perform fit
        #- Perform fit
        scale, rotation, offset_x, offset_y, zbpolids, zbcoeffs = \
            fit_scale_rotation_offset(rxpix, rypix, rxfp, ryfp, fitzb=True)

        self.scale = scale
        self.rotation = rotation
        self.offset_x = offset_x
        self.offset_y = offset_y
        self.zbpolids = zbpolids
        self.zbcoeffs = zbcoeffs

        #- Goodness of fit
        xfp_fidmeas, yfp_fidmeas = self.fvc2fp(fidspots['XPIX'],
                                               fidspots['YPIX'])
        dx = (metrology['X_FP'] - xfp_fidmeas)
        dy = (metrology['Y_FP'] - yfp_fidmeas)
        dr = np.sqrt(dx**2 + dy**2)
        log.info(
            'Mean, median, RMS distance = {:.1f}, {:.1f}, {:.1f} um'.format(
                1000 * np.mean(dr), 1000 * np.median(dr),
                1000 * np.sqrt(np.mean(dr**2))))

        if update_spots:
            xfp_meas, yfp_meas = self.fvc2fp(spots['XPIX'], spots['YPIX'])
            spots["X_FP"] = xfp_meas
            spots["Y_FP"] = yfp_meas

            #- the metrology table is in a different order than the original
            #- spots table, which is also a superset of the fidicual spots
            #- matched to the metrology, so find the sorting of the metrology
            #- that will match the order that they appear in the spots table
            iifid = (spots['LOCATION'] > 0) & (spots['PINHOLE_ID'] > 0)
            fidspots_pinloc = (spots['LOCATION'] * 10 +
                               spots['PINHOLE_ID'])[iifid]
            metro_pinloc = metrology['LOCATION'] * 10 + metrology['PINHOLE_ID']

            ii = np.argsort(np.argsort(fidspots_pinloc))
            jj = np.argsort(metro_pinloc)
            kk = jj[ii]

            #- Check that we got that dizzying array of argsorts right
            assert np.all(
                spots['LOCATION'][iifid] == metrology['LOCATION'][kk])
            assert np.all(
                spots['PINHOLE_ID'][iifid] == metrology['PINHOLE_ID'][kk])

            #- Update the spots table with metrology columns
            #- TODO: used masked arrays in addition to default=0
            spots["X_FP_METRO"] = np.zeros(len(spots))
            spots["Y_FP_METRO"] = np.zeros(len(spots))
            spots["Z_FP_METRO"] = np.zeros(len(spots))
            spots["X_FP_METRO"][iifid] = metrology['X_FP'][kk]
            spots["Y_FP_METRO"][iifid] = metrology['Y_FP'][kk]
            spots["Z_FP_METRO"][iifid] = metrology['Z_FP'][kk]
示例#3
0
    def fit(self, table ) :
        """TODO: document"""
        log = get_logger()

        #- identify ADC setups
        adc12=(np.array(table['ADC1'])+1000*np.array(table['ADC2']))
        uadc12 = np.unique(adc12)
        nconfig = len(uadc12)
        
        self.adc1     = np.zeros(nconfig,dtype=float)
        self.adc2     = np.zeros(nconfig,dtype=float)
        self.scale    = np.zeros(nconfig,dtype=float)
        self.rotation = np.zeros(nconfig,dtype=float)
        self.offset_x = np.zeros(nconfig,dtype=float)
        self.offset_y = np.zeros(nconfig,dtype=float)
        self.zbpolids = None
        self.zbcoeffs = list()
        
        for config, adc12v in enumerate(uadc12) :
            selection = (adc12==adc12v)
            self.adc1[config]=table['ADC1'][selection][0]
            self.adc2[config]=table['ADC2'][selection][0]
            print("Fitting ADC1={} ADC2={}".format(self.adc1[config],self.adc2[config]))
            
            #- Get reduced coordinates
            rxtan, rytan = _reduce_xytan(table['X_TAN'][selection], table['Y_TAN'][selection])
            rxfp, ryfp = _reduce_xyfp(table['X_FP'][selection], table['Y_FP'][selection])
            
            

            #################################################################
            ## CHOICE OF POLYNOMIALS IS HERE
            ## 
            polids = np.array([2, 5, 6, 9, 20, 27, 28, 29, 30],dtype=int)
            #################################################################
            #- Perform fit
            if 1 : # it's a bit better
                scale, rotation, offset_x, offset_y, zbpolids, zbcoeffs = fit_scale_rotation_offset(rxtan, rytan, rxfp, ryfp, fitzb=True, polids=polids)
                self.scale[config] = scale
                self.rotation[config] = rotation
                self.offset_x[config] = offset_x
                self.offset_y[config] = offset_y
            else :
                zbpolids, zbcoeffs, dx, dy =  fitZhaoBurge(rxtan, rytan, rxfp, ryfp, polids=polids)
            
                self.scale[config] = 1
                self.rotation[config] = 0.
                self.offset_x[config] = 0.
                self.offset_y[config] = 0.
                
            if self.zbpolids is None :
                self.zbpolids = zbpolids
            else :
                assert(np.all(self.zbpolids==zbpolids))
            self.zbcoeffs.append(zbcoeffs)

            #- Goodness of fit
            #xfp_fit, yfp_fit = self.tan2fp(table['X_TAN'][selection], table['Y_TAN'][selection])
            #dx = (table['X_FP'][selection] - xfp_fit)
            #dy = (table['Y_FP'][selection] - yfp_fit)
            #dr = np.sqrt(dx**2 + dy**2)
            #log.info('Mean, median, RMS distance = {:.1f}, {:.1f}, {:.1f} um'.format(
            #    1000*np.mean(dr), 1000*np.median(dr), 1000*np.sqrt(np.mean(dr**2))))

        self.zbcoeffs = np.vstack(self.zbcoeffs)
示例#4
0
def detectspots(fvcimage,
                min_counts_per_pixel=500,
                min_counts_per_spot=5000,
                nsig=7,
                psf_sigma=1.):
    """
    Detect spots in a fiber view image and measure their centroids and flux
    Args:
        fvcimage : 2D numpy array

    Optional:
       min_counts_per_pixel : float, use max of this min_counts_per_pixel in counts/pixel and nsig*rms to do a first
                    detection of peaks
    returns astropy.Table with spots, columns are xpix,ypix,xerr,yerr,counts
    """

    log = get_logger()

    n0 = fvcimage.shape[0]
    n1 = fvcimage.shape[1]

    # find peaks = local maximum above min_counts_per_pixel
    log.info("gaussian convolve with sigma = {:2.1f} pixels".format(psf_sigma))
    convolved_image = gaussian_convolve(fvcimage, psf_sigma)

    # measure pedestal and rms
    # look the values of a random subsample of the image
    nrand = 20000
    ii0 = (np.random.uniform(size=nrand) * n0).astype(int)
    ii1 = (np.random.uniform(size=nrand) * n1).astype(int)
    vals = convolved_image[ii0, ii1].ravel()
    mval = np.median(vals)
    #- normalized median absolute deviation as robust version of RMS
    #- see https://en.wikipedia.org/wiki/Median_absolute_deviation
    convolved_image_rms = 1.4826 * np.median(np.abs(vals - mval))
    ok = np.abs(vals - mval) < 4 * convolved_image_rms
    mval = np.mean(vals[ok])
    convolved_image_rms = np.std(vals[ok])
    log.info("convolved image pedestal={:4.2f} rms={:4.2f}".format(
        mval, convolved_image_rms))
    # remove mean; if fvcimage was integer input, this will cast to float64
    fvcimage = fvcimage - mval
    convolved_image -= mval

    #import matplotlib.pyplot as plt
    #plt.hist(vals[ok]-mval,bins=100)
    #plt.show()

    if min_counts_per_pixel is None:
        threshold = nsig * convolved_image_rms
    else:
        threshold = max(min_counts_per_pixel, nsig * convolved_image_rms)

    peaks = np.zeros((n0, n1))
    peaks[1:-1,
          1:-1] = ((convolved_image[1:-1, 1:-1] > convolved_image[:-2, 1:-1]) *
                   (convolved_image[1:-1, 1:-1] > convolved_image[2:, 1:-1]) *
                   (convolved_image[1:-1, 1:-1] > convolved_image[1:-1, :-2]) *
                   (convolved_image[1:-1, 1:-1] > convolved_image[1:-1, 2:]) *
                   (convolved_image[1:-1, 1:-1] > threshold))
    # loop on peaks
    peakindices = np.where(peaks.ravel() > 0)[0]
    npeak = len(peakindices)
    if npeak == 0:
        log.error("no spot found")
        raise RuntimeError("no spot found")
    else:
        log.info("found {} peaks".format(npeak))
        if npeak > 10000:
            log.error("this is far too many, is the room/dome light on??")
            raise RuntimeError("too many spots")
    xpix = np.zeros(npeak)
    ypix = np.zeros(npeak)
    xerr = np.zeros(npeak)
    yerr = np.zeros(npeak)
    counts = np.zeros(npeak)
    hw = 3

    xoffset = 0.  # would need offsets of 1 to match with POS file. not sure what is the best choice here.
    yoffset = 0.
    if xoffset != 0 or yoffset != 0:
        log.warning(
            "Applying offsets x += {} and y += {} (to match with others, like the POS files)"
            .format(xoffset, yoffset))
    if xoffset == 0 and yoffset == 0:
        log.debug(
            "Here center of first pixel has coord=(0,0); so we expect offsets of 1 with respect to coordinates in .pos files."
        )

    # compute noise in original image
    vals = fvcimage[ii0, ii1].ravel()
    mval = np.median(vals)
    fvcimage_rms = 1.4826 * np.median(np.abs(vals - mval))
    ok = np.abs(vals - mval) < 4 * fvcimage_rms
    mval = np.mean(vals[ok])
    fvcimage_rms = np.std(vals[ok])
    log.info("fvc image pedestal={:4.2f} rms={:4.2f}".format(
        mval, fvcimage_rms))

    for j, index in enumerate(peakindices):
        i0 = index // n1
        i1 = index % n1
        if 1:  #try :
            x, y, ex, ey, c = fitcentroid(fvcimage[i0 - hw:i0 + hw + 1,
                                                   i1 - hw:i1 + hw + 1],
                                          noise=fvcimage_rms)
            xpix[
                j] = x + i1 + xoffset  #  x is along axis=1 in python , also adding offset (definition of pixel coordinates)
            ypix[
                j] = y + i0 + yoffset  #  y is along axis=0 in python , also adding offset (definition of pixel coordinates)
            xerr[j] = ex
            yerr[j] = ey
            counts[j] = c
        #except Exception as e:
        #    log.error("failed to fit a centroid {}".format(e))

        log.debug("{} x={} y={} counts={}".format(j, xpix[j], ypix[j],
                                                  counts[j]))

    if min_counts_per_spot > 0:
        good = (counts >= min_counts_per_spot)
        xpix = xpix[good]
        ypix = ypix[good]
        xerr = xerr[good]
        yerr = yerr[good]
        counts = counts[good]

    for _ in range(100):
        # iterate, removing duplicates
        xy = np.array([xpix, ypix]).T
        tree = KDTree(xy)
        distances, indices = tree.query(xy, k=2)  # go up to 4 bad detections
        distances = distances[:, 1]  # discard same
        indices = indices[:, 1]  # discard same
        bad = np.where(distances < psf_sigma)[0]  # at least 1 duplicate
        if bad.size > 0:
            bad = bad[0]
            index_to_remove = indices[bad]
            log.warning(
                "remove one duplicated detection of spot at x,y={:5.3f},{:5.3f} and {:5.3f},{:5.3f} at distance={}"
                .format(xpix[bad], ypix[bad], xpix[index_to_remove],
                        ypix[index_to_remove], distances[bad]))
            if counts[bad] < counts[index_to_remove]:
                index_to_remove = bad  # remove the faintest
            xpix = np.delete(xpix, index_to_remove)
            ypix = np.delete(ypix, index_to_remove)
            xerr = np.delete(xerr, index_to_remove)
            yerr = np.delete(yerr, index_to_remove)
            counts = np.delete(counts, index_to_remove)
        else:
            break  #exit

    table = Table([xpix, ypix, xerr, yerr, counts],
                  names=("XPIX", "YPIX", "XERR", "YERR", "COUNTS"))
    return table
示例#5
0
def tan2radec(x_tan,
              y_tan,
              tel_ra,
              tel_dec,
              mjd,
              lst_deg,
              hexrot_deg,
              precession=True,
              aberration=True,
              polar_misalignment=True,
              use_astropy=False):
    """
    Convert ICRS coordinates to tangent plane coordinates

    Args:
        xtan: float or 1D np.array with tangent plane coordinates
        ytan: float or 1D np.array with tangent plane coordinates
        tel_ra: float, in degrees, telescope pointing RA
        tel_dec: float, in degrees, telescope pointing Dec
        mjd: float, Modified Julian Date of observation, in days
        lst_deg: float, local sidereal time, in degrees
        hexrot_deg: float, hexapod rotation angle, in degrees
    
    Returns RA,Dec in ICRS system (hopefully) 
  
    Optional arguments:
        aberration: boolean; compute aberration if True
        polar_misalignment: boolean; compute polar misalignment if True
        use_astropy: boolean; use astropy coordinates for precession and aberration if True
    """

    log = get_logger()

    # undo hexapod rotation
    chex = cosd(hexrot_deg)
    shex = sind(hexrot_deg)
    x = chex * x_tan + shex * y_tan
    y = -shex * x_tan + chex * y_tan

    # need to apply precession ... etc to telescope pointing to interpret the x,y
    if precession:
        tel_ra, tel_dec = apply_precession_from_icrs(tel_ra, tel_dec, mjd,
                                                     use_astropy)
    if aberration:
        tel_ra, tel_dec = apply_aberration(tel_ra, tel_dec, mjd, use_astropy)

    tel_ha = lst_deg - tel_ra

    if polar_misalignment:
        polar_misalignment_matrix = compute_polar_misalignment_rotation_matrix(
            me_arcsec=ME_ARCSEC, ma_arcsec=MA_ARCSEC)
        tel_ha, tel_dec = getLONLAT(
            polar_misalignment_matrix.dot(getXYZ(tel_ha, tel_dec)))

    # we need to apply refraction for the telescope pointing to interpret the x,y
    tel_alt, tel_az = hadec2altaz(tel_ha, tel_dec)
    # apply refraction
    refracted_tel_alt = apply_refraction(tel_alt)
    # back to ha,dec
    refracted_tel_ha, refracted_tel_dec = altaz2hadec(refracted_tel_alt,
                                                      tel_az)
    # now convert x,y to ha,dec
    refracted_ha, refracted_dec = xy2hadec(x, y, refracted_tel_ha,
                                           refracted_tel_dec)

    # alt,az
    alt, az = hadec2altaz(refracted_ha, refracted_dec)

    # undo refraction
    alt = undo_refraction(alt)

    # back to ha,dec
    ha, dec = altaz2hadec(alt, az)

    # now polar mis-alignment
    if polar_misalignment:
        # inverse matrix
        polar_misalignment_matrix = compute_polar_misalignment_rotation_matrix(
            me_arcsec=-ME_ARCSEC, ma_arcsec=-MA_ARCSEC)
        ha, dec = getLONLAT(polar_misalignment_matrix.dot(getXYZ(ha, dec)))

    # ra
    ra = lst_deg - ha

    if aberration:
        ra, dec = undo_aberration(ra, dec, mjd, use_astropy)

    if precession:
        ra, dec = undo_precession_from_icrs(ra, dec, mjd, use_astropy)

    return ra, dec
示例#6
0
def radec2tan(ra,
              dec,
              tel_ra,
              tel_dec,
              mjd,
              lst_deg,
              hexrot_deg,
              precession=True,
              aberration=True,
              polar_misalignment=True,
              use_astropy=False):
    """
    Convert ICRS coordinates to tangent plane coordinates

    Args:
        ra: float or 1D np.array with RA in degrees
        dec: float or 1D np.array with RA in degrees
        tel_ra: float, in degrees, telescope pointing RA
        tel_dec: float, in degrees, telescope pointing Dec
        mjd: float, Modified Julian Date of observation, in days
        lst_deg: float, local sidereal time, in degrees
        hexrot_deg: float, hexapod rotation angle, in degrees
    
    Returns x_tan,y_tan , tangent plane coordinates:
        x_tan = sin(theta)*cos(phi) : float on np.array (same shape as input ra,dec)
        y_tan = sin(theta)*sin(phi) : float on np.array (same shape as input ra,dec)
        
        where theta,phi are polar coordinates. theta=0 for along the telescope
        pointing. phi=0 for a star with the same Dec as the telescope
        pointing but a larger HA (or smaller RA).
  
    Optional arguments:
        aberration: boolean; compute aberration if True
        polar_misalignment: boolean; compute polar misalignment if True
        use_astropy: boolean; use astropy coordinates for precession and aberration if True
    """
    log = get_logger()

    if precession:
        ra, dec = apply_precession_from_icrs(ra, dec, mjd, use_astropy)
        tel_ra, tel_dec = apply_precession_from_icrs(tel_ra, tel_dec, mjd,
                                                     use_astropy)

    if aberration:
        ra, dec = apply_aberration(ra, dec, mjd, use_astropy)
        tel_ra, tel_dec = apply_aberration(tel_ra, tel_dec, mjd, use_astropy)

    # ha,dec
    ha = lst_deg - ra
    tel_ha = lst_deg - tel_ra

    if polar_misalignment:
        # rotate
        polar_misalignment_matrix = compute_polar_misalignment_rotation_matrix(
            me_arcsec=ME_ARCSEC, ma_arcsec=MA_ARCSEC)
        ha, dec = getLONLAT(polar_misalignment_matrix.dot(getXYZ(ha, dec)))
        tel_ha, tel_dec = getLONLAT(
            polar_misalignment_matrix.dot(getXYZ(tel_ha, tel_dec)))

    # alt,az
    alt, az = hadec2altaz(ha, dec)
    tel_alt, tel_az = hadec2altaz(tel_ha, tel_dec)

    # apply refraction
    alt = apply_refraction(alt)
    tel_alt = apply_refraction(tel_alt)

    # convert back to ha,dec
    ha, dec = altaz2hadec(alt, az)
    tel_ha, tel_dec = altaz2hadec(tel_alt, tel_az)

    # tangent plane
    x, y = hadec2xy(ha, dec, tel_ha, tel_dec)

    # hexapod rotation
    chex = cosd(hexrot_deg)
    shex = sind(hexrot_deg)

    return chex * x - shex * y, +shex * x + chex * y
示例#7
0
def match_arbitrary_translation_dilatation(x1,y1,x2,y2) :
    """
    Match two catalogs in different coordinate systems, 1 and 2, related by a translation, a dilatation, and possibly a "small" rotation
    The orientation of triangles is used for the match so the rotation has to be small.
    Inspired from http://articles.adsabs.harvard.edu/pdf/1986AJ.....91.1244G
    
    Args:
        x1 : float numpy array of coordinates along first axis of cartesian coordinate system 1
        y1 : float numpy array of coordinates along second axis of cartesian coordinate system 1
        x2 : float numpy array of coordinates along first axis of cartesian coordinate system 2
        y2 : float numpy array of coordinates along second axis of cartesian coordinate system 2
    
    returns:
        indices_2 : integer numpy array. if ii is a index array for entries in the first catalog, 
                            indices_2[ii] is the index array of best matching entries in the second catalog.
                            (one should compare x1[ii] with x2[indices_2[ii]])
                            negative values for unmatched entries.
        distance : distance between pairs of triangles. It can be used to discard bad matches. 

    """

    log = get_logger()
    
    # compute all possible triangles in both data sets
    # txyz are properties of the shape and orientation of the triangles
    log.debug("compute triangles")
    tk1,txyz1 = compute_triangles_with_fixed_orientation(x1,y1)
    tk2,txyz2 = compute_triangles_with_fixed_orientation(x2,y2)
    
    log.debug("match triangles")
    # match with kdtree triangles with same shape and orientation
    tree2=KDTree(txyz2)
    triangle_distances,triangle_indices_2 = tree2.query(txyz1,k=1)
    
    # now that we have match of triangles , need to match back catalog entries
    ranked_pairs = np.argsort(triangle_distances)
    
    indices_2 = -1*np.ones(x1.size,dtype=int)
    distances = np.zeros(x1.size)
    
    all_matched = False
    log.debug("match catalogs using pairs of triangles")
    for p in ranked_pairs :

        k1=tk1[p] # incides (in x1,y1) of vertices of this triangle (size=3)
        k2=tk2[triangle_indices_2[p]] # incides (in x2,y2) of vertices of other triangle
        
        # check unmatched or equal
        if np.any((indices_2[k1]>=0)&(indices_2[k1]!=k2)) :
            log.warning("skip {} <=> {}".format(k1,k2))
            continue
        indices_2[k1]=k2
        distances[k1]=triangle_distances[p]
        all_matched = (np.sum(indices_2>=0)==x1.size)
        if all_matched :
            log.debug("all matched")
            break

    # check duplicates
    for i2 in np.unique(indices_2[indices_2>=0]) :
        ii=(indices_2==i2)
        if np.sum(ii) > 1 :
            log.warning("{} duplicates for i2={}".format(np.sum(ii),i2))
            indices_2[ii]=-1
    
    return indices_2 , distances
示例#8
0
    def fit_tancorr(self,catalog,mjd=None,hexrot_deg=None,lst=None) :

        log = get_logger()
        
        x_gfa  = catalog["xcentroid"]
        y_gfa  = catalog["ycentroid"]
        ra_gaia  = catalog["ra_gaia"]
        dec_gaia = catalog["dec_gaia"]
        
        # mjd,hexprot_deg,lst could have been set before
        if mjd is not None :
            self.mjd = mjd
            log.info("Use argument MJD={}".format(self.mjd))
        elif "mjd_obs" in catalog.keys():
            self.mjd    = np.mean(catalog["mjd_obs"])
            log.info("Use 'mjd_obs' in catalog, MJD={}".format(self.mjd))
        elif self.mjd is None :
            log.error("mjd is None")
            raise RuntimeError("mjd is None")
        else :
            log.info("Use MJD={}".format(self.mjd))
        
        if hexrot_deg is not None :
            self.hexrot_deg = hexrot_deg
        elif self.hexrot_deg is None :
            log.error("hexrot_deg is None")
            raise RuntimeError("hexrot_deg is None")

        if lst is not None :
            self.lst = lst
        elif self.lst is None :
            log.warning("Compute LST from MJD={}".format(self.mjd))
            self.lst = mjd2lst(self.mjd)
        log.info("Use LST={}".format(self.lst))
            
        # first transfo: gfa2fp
        x_fp,y_fp = self.all_gfa2fp(x_gfa,y_gfa,petal_loc=catalog["petal_loc"])
        
        # keep only petal data for which we have the metrology        
        selection = (x_fp!=0)
        x_gfa    = x_gfa[selection]
        y_gfa    = y_gfa[selection]
        x_fp     = x_fp[selection]
        y_fp     = y_fp[selection]
        ra_gaia  = ra_gaia[selection]
        dec_gaia = dec_gaia[selection]

        # transform focal plane to tangent plane
        x_tan_meas,y_tan_meas = fp2tan(x_fp,y_fp,self.adc1,self.adc2)

        correction = TanCorr()

        for loop in range(3) : # loop because change of pointing induces a rotation of the field
            
            # we transform GAIA coordinates to the tangent plane
            x_tan_gaia,y_tan_gaia = radec2tan(ra_gaia,dec_gaia,self.ra,self.dec,mjd=self.mjd,lst_deg=self.lst,hexrot_deg = self.hexrot_deg, precession = self.precession, aberration = self.aberration, polar_misalignment = self.polar_misalignment)
        
            # now that we have both sets of coordinates, we fit a transformation from one to the other
            correction.fit(x_tan_meas,y_tan_meas,x_tan_gaia,y_tan_gaia)

            # opposite sign for the telescope offset because I have converted GAIA RA Dec to tangent plane ...
            self.dec -= correction.ddec
            self.ra  += correction.dha/np.cos(self.dec*np.pi/180.) # HA = LST-RA

        
        # save params to this
        log.info("RMS coord. residual = {:3.2f} arcsec".format(correction.rms_arcsec))
        log.info("Rotation angle (field rot ZP) ={:4.3f} deg".format(correction.rot_deg))
        log.info("Pointing correction dHA={:3.2f} arcsec, dDec={:3.2f} arcsec".format(correction.dha*3600.,correction.ddec*3600.))
        log.info("Scales sxx={:5.4f} syy={:5.4f} sxy={:5.4f}".format(correction.sxx,correction.syy,correction.sxy))

        # now I just copy the correction parameters in this class
        self.sxx = correction.sxx
        self.syy = correction.syy
        self.sxy = correction.sxy
        self.fieldrot_zp_deg = correction.rot_deg
        self.nstars = correction.nstars
        self.rms_arcsec = correction.rms_arcsec

        # I now derive the field rotation
        self.fieldrot_deg = self.compute_fieldrot()
示例#9
0
    def fit(self,
            spots,
            metrology=None,
            update_spots=False,
            zbfit=True,
            fixed_scale=False,
            fixed_rotation=False):
        """
        TODO: document
        """

        log = get_logger()
        if metrology is not None:
            self.metrology = metrology
        else:
            self.metrology = load_metrology()

        #- Trim spots to just fiducial spots (not posioners, not unmatchs spots)
        ii = (spots['LOCATION'] >= 0) & (spots['PINHOLE_ID'] > 0)
        fidspots = spots[ii]

        #- trim metrology to just the ones that have spots
        fidspots_pinloc = fidspots['LOCATION'] * 10 + fidspots['PINHOLE_ID']
        metro_pinloc = self.metrology['LOCATION'] * 10 + self.metrology[
            'PINHOLE_ID']
        jj = np.in1d(metro_pinloc, fidspots_pinloc)
        metrology = self.metrology[jj]

        #- Sort so that they match each other
        fidspots.sort(keys=('LOCATION', 'PINHOLE_ID'))
        metrology.sort(keys=('LOCATION', 'PINHOLE_ID'))
        assert np.all(fidspots['LOCATION'] == metrology['LOCATION'])
        assert np.all(fidspots['PINHOLE_ID'] == metrology['PINHOLE_ID'])

        #- Get reduced coordinates
        rxpix, rypix = self._reduce_xyfvc(fidspots['XPIX'], fidspots['YPIX'])
        rxfp, ryfp = self._reduce_xyfp(metrology['X_FP'], metrology['Y_FP'])

        if fixed_rotation:
            fixed_rotation_value = self.rotation
            log.info(
                "Use fixed rotation = {:5.4f}".format(fixed_rotation_value))
        else:
            fixed_rotation_value = None

        if fixed_scale:
            fixed_scale_value = self.scale
            log.info("Use fixed scale = {:5.4f}".format(fixed_scale_value))
        else:
            fixed_scale_value = None

        res = fit_scale_rotation_offset(rxpix,
                                        rypix,
                                        rxfp,
                                        ryfp,
                                        fitzb=zbfit,
                                        zbpolids=self.zbpolids,
                                        zbcoeffs=self.zbcoeffs,
                                        fixed_scale=fixed_scale_value,
                                        fixed_rotation=fixed_rotation_value)
        self.scale = res[0]
        self.rotation = res[1]
        self.offset_x = res[2]
        self.offset_y = res[3]
        if zbfit:
            self.zbpolids = res[4]
            self.zbcoeffs = res[5]

        #- Goodness of fit
        xfp_fidmeas, yfp_fidmeas = self.fvc2fp(fidspots['XPIX'],
                                               fidspots['YPIX'])
        dx = (metrology['X_FP'] - xfp_fidmeas)
        dy = (metrology['Y_FP'] - yfp_fidmeas)
        dr = np.sqrt(dx**2 + dy**2)
        log.info(
            'Mean, median, RMS distance = {:.1f}, {:.1f}, {:.1f} um'.format(
                1000 * np.mean(dr), 1000 * np.median(dr),
                1000 * np.sqrt(np.mean(dr**2))))

        if update_spots:
            xfp_meas, yfp_meas = self.fvc2fp(spots['XPIX'], spots['YPIX'])
            spots["X_FP"] = xfp_meas
            spots["Y_FP"] = yfp_meas

            #- the metrology table is in a different order than the original
            #- spots table, which is also a superset of the fidicual spots
            #- matched to the metrology, so find the sorting of the metrology
            #- that will match the order that they appear in the spots table
            iifid = (spots['LOCATION'] > 0) & (spots['PINHOLE_ID'] > 0)
            fidspots_pinloc = (spots['LOCATION'] * 10 +
                               spots['PINHOLE_ID'])[iifid]
            metro_pinloc = metrology['LOCATION'] * 10 + metrology['PINHOLE_ID']

            ii = np.argsort(np.argsort(fidspots_pinloc))
            jj = np.argsort(metro_pinloc)
            kk = jj[ii]

            #- Check that we got that dizzying array of argsorts right
            assert np.all(
                spots['LOCATION'][iifid] == metrology['LOCATION'][kk])
            assert np.all(
                spots['PINHOLE_ID'][iifid] == metrology['PINHOLE_ID'][kk])

            #- Update the spots table with metrology columns
            #- TODO: used masked arrays in addition to default=0
            spots["X_FP_METRO"] = np.zeros(len(spots))
            spots["Y_FP_METRO"] = np.zeros(len(spots))
            spots["Z_FP_METRO"] = np.zeros(len(spots))
            spots["X_FP_METRO"][iifid] = metrology['X_FP'][kk]
            spots["Y_FP_METRO"][iifid] = metrology['Y_FP'][kk]
            spots["Z_FP_METRO"][iifid] = metrology['Z_FP'][kk]
示例#10
0
def findfiducials(spots, input_transform=None, pinhole_max_separation_mm=1.5):

    global metrology_pinholes_table
    global metrology_fiducials_table
    log = get_logger()

    log.debug(
        "load input tranformation we will use to go from FP to FVC pixels")
    if input_transform is None:
        input_transform = fvc2fp_filename()

    log.info("loading input tranform from {}".format(input_transform))
    input_tx = FVC2FP.read_jsonfile(input_transform)

    xpix = np.array([
        2000.,
    ])
    ypix = np.array([
        0.,
    ])
    xfp1, yfp1 = input_tx.fvc2fp(xpix, ypix)
    xfp2, yfp2 = input_tx.fvc2fp(xpix + 1, ypix)
    pixel2fp = np.hypot(xfp2 - xfp1, yfp2 - yfp1)[0]  # mm
    pinhole_max_separation_pixels = pinhole_max_separation_mm / pixel2fp
    log.info(
        "with pixel2fp = {:4.3f} mm, pinhole max separation = {:4.3f} pixels ".
        format(pixel2fp, pinhole_max_separation_pixels))

    if metrology_pinholes_table is None:
        metrology_table = load_metrology()

        log.debug("keep only the pinholes")
        metrology_pinholes_table = metrology_table[:][
            (metrology_table["DEVICE_TYPE"] == "FIF") |
            (metrology_table["DEVICE_TYPE"] == "GIF")]

        # use input transform to convert X_FP,Y_FP to XPIX,YPIX
        xpix, ypix = input_tx.fp2fvc(metrology_pinholes_table["X_FP"],
                                     metrology_pinholes_table["Y_FP"])
        metrology_pinholes_table["XPIX"] = xpix
        metrology_pinholes_table["YPIX"] = ypix

        log.debug("define fiducial location as the most central dot")
        central_pinholes = []
        for loc in np.unique(metrology_pinholes_table["LOCATION"]):
            ii = np.where(metrology_pinholes_table["LOCATION"] == loc)[0]
            mx = np.mean(metrology_pinholes_table["XPIX"][ii])
            my = np.mean(metrology_pinholes_table["YPIX"][ii])
            k = np.argmin((metrology_pinholes_table["XPIX"][ii] - mx)**2 +
                          (metrology_pinholes_table["YPIX"][ii] - my)**2)
            central_pinholes.append(ii[k])
        metrology_fiducials_table = metrology_pinholes_table[:][
            central_pinholes]

    # find fiducials candidates
    log.info("select spots with at least two close neighbors (in pixel units)")
    nspots = spots["XPIX"].size
    xy = np.array([spots["XPIX"], spots["YPIX"]]).T
    tree = KDTree(xy)

    measured_spots_distances, measured_spots_indices = tree.query(
        xy, k=4, distance_upper_bound=pinhole_max_separation_pixels)
    number_of_neighbors = np.sum(
        measured_spots_distances < pinhole_max_separation_pixels, axis=1)
    fiducials_candidates_indices = np.where(
        number_of_neighbors >= 4)[0]  # including self, so at least 3 pinholes
    log.debug("number of fiducials=", fiducials_candidates_indices.size)

    # match candidates to fiducials from metrology
    log.info(
        "first match {} fiducials candidates to metrology ({}) with iterative fit"
        .format(fiducials_candidates_indices.size,
                len(metrology_fiducials_table)))
    x1 = spots["XPIX"][fiducials_candidates_indices]
    y1 = spots["YPIX"][fiducials_candidates_indices]
    x2 = metrology_fiducials_table["XPIX"]
    y2 = metrology_fiducials_table["YPIX"]

    nloop = 20
    saved_median_distance = 0
    for loop in range(nloop):
        indices_2, distances = match_same_system(x1, y1, x2, y2)
        mdist = np.median(distances[indices_2 >= 0])
        if loop < nloop - 1:
            maxdistance = max(10, 3. * 1.4 * mdist)
        else:  # final iteration
            maxdistance = 10  # pixel
        selection = np.where((indices_2 >= 0) & (distances < maxdistance))[0]
        log.info("iter #{} median_dist={} max_dist={} matches={}".format(
            loop, mdist, maxdistance, selection.size))
        corr21 = SimpleCorr()
        corr21.fit(x2[indices_2[selection]], y2[indices_2[selection]],
                   x1[selection], y1[selection])
        x2, y2 = corr21.apply(x2, y2)
        if np.abs(saved_median_distance - mdist) < 0.0001:
            break  # no more improvement
        saved_median_distance = mdist

    # use same coord system match (note we now match the otherway around)
    indices_1, distances = match_same_system(x2, y2, x1, y1)
    maxdistance = 10.  # FVC pixels
    selection = np.where((indices_1 >= 0) & (distances < maxdistance))[0]
    fiducials_candidates_indices = fiducials_candidates_indices[
        indices_1[selection]]
    matching_known_fiducials_indices = selection

    log.debug(
        "mean distance = {:4.2f} pixels for {} matched and {} known fiducials".
        format(np.mean(distances[distances < maxdistance]),
               fiducials_candidates_indices.size,
               metrology_fiducials_table["XPIX"].size))

    log.debug("now matching pinholes ...")

    nspots = spots["XPIX"].size
    for k in ['LOCATION', 'PETAL_LOC', 'DEVICE_LOC', 'PINHOLE_ID']:
        if k not in spots.dtype.names:
            spots.add_column(Column(np.zeros(nspots, dtype=int)), name=k)
    spots["LOCATION"][:] = -1
    spots["PETAL_LOC"][:] = -1
    spots["DEVICE_LOC"][:] = -1
    spots["PINHOLE_ID"][:] = 0

    for index1, index2 in zip(fiducials_candidates_indices,
                              matching_known_fiducials_indices):
        location = metrology_fiducials_table["LOCATION"][index2]

        # get indices of all pinholes for this matched fiducial
        # note we now use the full pinholes metrology table
        pi1 = measured_spots_indices[index1][
            measured_spots_distances[index1] < pinhole_max_separation_pixels]
        pi2 = np.where(metrology_pinholes_table["LOCATION"] == location)[0]

        x1 = spots["XPIX"][pi1]
        y1 = spots["YPIX"][pi1]

        x2 = metrology_pinholes_table["XPIX"][pi2]
        y2 = metrology_pinholes_table["YPIX"][pi2]

        indices_2, distances = match_arbitrary_translation_dilatation(
            x1, y1, x2, y2)

        metrology_pinhole_ids = metrology_pinholes_table["PINHOLE_ID"][pi2]
        pinhole_ids = np.zeros(x1.size, dtype=int)
        matched = (indices_2 >= 0)
        pinhole_ids[matched] = metrology_pinhole_ids[indices_2[matched]]

        spots["LOCATION"][pi1[matched]] = location
        spots["PINHOLE_ID"][pi1[matched]] = pinhole_ids[matched]

        if np.sum(pinhole_ids == 0) > 0:
            log.warning(
                "only matched pinholes {} for {} detected at LOCATION {} xpix~{} ypix~{}"
                .format(pinhole_ids[pinhole_ids > 0], x1.size, location,
                        int(np.mean(x1)), int(np.mean(y1))))

        # check duplicates
        if np.unique(
                pinhole_ids[pinhole_ids > 0]).size != np.sum(pinhole_ids > 0):
            xfp = np.mean(metrology_pinholes_table[pi2]["X_FP"])
            yfp = np.mean(metrology_pinholes_table[pi2]["Y_FP"])
            log.warning(
                "duplicate(s) pinhole ids in {} at LOCATION={} xpix~{} ypix~{} xfp~{} yfp~{}"
                .format(pinhole_ids, location, int(np.mean(x1)),
                        int(np.mean(y1)), int(xfp), int(yfp)))
            bc = np.bincount(pinhole_ids[pinhole_ids > 0])
            duplicates = np.where(bc > 1)[0]
            for duplicate in duplicates:
                log.warning(
                    "Unmatch ambiguous pinhole id = {}".format(duplicate))
                selection = (spots["LOCATION"]
                             == location) & (spots["PINHOLE_ID"] == duplicate)
                spots["PINHOLE_ID"][selection] = 0

    ii = (spots["LOCATION"] >= 0)
    spots["PETAL_LOC"][ii] = spots["LOCATION"][ii] // 1000
    spots["DEVICE_LOC"][ii] = spots["LOCATION"][ii] % 1000

    n_matched_pinholes = np.sum(spots["PINHOLE_ID"] > 0)
    n_matched_fiducials = np.sum(spots["PINHOLE_ID"] == 4)
    log.info("matched {} pinholes from {} fiducials".format(
        n_matched_pinholes, n_matched_fiducials))

    return spots
示例#11
0
    def fit(self, spots, degree=3, metrology=None, update_spots=False):

        log = get_logger()
        self.degree = degree
        if metrology is not None:
            self.metrology_table = metrology
        else:
            filename = resource_filename('desimeter', "data/fp-metrology.csv")
            if not os.path.isfile(filename):
                log.error("cannot find {}".format(filename))
                raise IOError("cannot find {}".format(filename))
            log.info("reading fiducials metrology in {}".format(filename))
            self.metrology_table = Table.read(filename, format="csv")

        selection = np.where((spots["LOCATION"] > 0))[0]
        if len(selection) < 4:
            log.error(
                "Only {} fiducials were matched, I cannot fit a transform".
                format(len(selection)))
            raise RuntimError(
                "Only {} fiducials were matched, I cannot fit a transform".
                format(len(selection)))

        xpix = spots["XPIX"][selection]
        ypix = spots["YPIX"][selection]
        xerr = spots["XERR"][selection]
        yerr = spots["YERR"][selection]

        # now match

        spots_identifier = np.array(spots["LOCATION"][selection] * 100 +
                                    spots["PINHOLE_ID"][selection])
        metro_identifier = np.array(self.metrology_table["LOCATION"] * 100 +
                                    self.metrology_table["PINHOLE_ID"])

        tmpid = {f: i
                 for i, f in enumerate(metro_identifier)
                 }  # dictionary of indices

        spots_indices = []
        metro_indices = []

        for i, f in enumerate(spots_identifier):
            if f in tmpid:
                spots_indices.append(i)
                metro_indices.append(tmpid[f])
            else:
                log.warning(
                    "cannot find metrology for LOCATION={} PINHOLE_ID={}".
                    format(int(f // 100), int(f % 100)))

        xfp = self.metrology_table["X_FP"][metro_indices]
        yfp = self.metrology_table["Y_FP"][metro_indices]
        xpix = xpix[spots_indices]
        ypix = ypix[spots_indices]
        xerr = xerr[spots_indices]
        yerr = yerr[spots_indices]
        selection = selection[spots_indices]
        log.warning(
            "Trivial transformation fit with polynomials (pretty bad residuals)"
        )

        #---
        # solve linear system for FVC [pix] -> FP [mm]

        rx, ry = self._fvc_reduced_coords(xpix, ypix)
        self.Px, self.Py = _polyfit2d(rx, ry, xfp, yfp, self.degree)

        log.info('FVC -> FP: {} parameters'.format(len(self.Px)))
        log.debug("Px={}".format(self.Px))
        log.debug("Py={}".format(self.Py))

        #---
        # Solve inverse linear system for FP [mm] -> FVC [pix]
        # Use higher degree and denser sampling of transform

        rpix = np.linspace(0, 6000, num=25)
        xpix, ypix = np.meshgrid(rpix, rpix)
        xpix = xpix.ravel()
        ypix = ypix.ravel()
        xfp, yfp = self.fvc2fp(xpix, ypix)

        rx, ry = self._fp_reduced_coords(xfp, yfp)
        log.debug('FP minmax(rx) = {:.3f}, {:.3f}'.format(
            np.min(rx), np.max(rx)))
        log.debug('FP minmax(ry) = {:.3f}, {:.3f}'.format(
            np.min(ry), np.max(ry)))
        self.Qx, self.Qy = _polyfit2d(rx, ry, xpix, ypix, self.degree + 5)
        log.info('FP -> FVC: {} parameters'.format(len(self.Qx)))

        #---
        # check goodness of fit metrics

        xfp = self.metrology_table["X_FP"][metro_indices]
        yfp = self.metrology_table["Y_FP"][metro_indices]
        xfp_meas, yfp_meas = self.fvc2fp(spots["XPIX"], spots["YPIX"])

        dist = np.sqrt((xfp_meas[selection] - xfp)**2 +
                       (yfp_meas[selection] - yfp)**2)
        mdist = np.mean(dist)
        log.info("Mean and median distance = {:.1f}, {:.1f} um".format(
            1000 * np.mean(dist), 1000 * np.median(dist)))

        if update_spots:
            spots["X_FP"] = xfp_meas
            spots["Y_FP"] = yfp_meas
            spots["X_FP_METRO"] = np.zeros(xfp_meas.size)
            spots["Y_FP_METRO"] = np.zeros(xfp_meas.size)
            spots["X_FP_METRO"][selection] = xfp
            spots["Y_FP_METRO"][selection] = yfp
示例#12
0
import numpy as np
import astropy.io.ascii
from scipy import optimize
import scipy.linalg
from desimeter.match_positioners import match
import scipy.spatial
from astropy.stats import mad_std

# accommodate both use in online system and in datasystems environment
# use online system logging when available
try:
    import DOSLib.logger as log
    log.warning = log.warn
except Exception:
    from desimeter.log import get_logger
    log = get_logger()

# =================================================================
# from Sergey : correction using local polynomial fit
# =================================================================


def getpoly(x, y, ndeg=2):
    """
    Get the 2D polynomial design matrix
    """
    N = len(x)
    polys = np.zeros((((ndeg + 1) * (ndeg + 2)) // 2 - 1, N * 2))  # []
    cnt = 0
    for deg in range(1, ndeg + 1):
        for j in range(deg + 1):
示例#13
0
def findfiducials(spots, input_transform=None, separation=8.):

    global metrology_pinholes_table
    global metrology_fiducials_table
    log = get_logger()

    log.debug(
        "load input tranformation we will use to go from FP to FVC pixels")
    if input_transform is None:
        input_transform = resource_filename('desimeter',
                                            "data/single-lens-fvc2fp.json")

    log.info("loading input tranform from {}".format(input_transform))
    try:
        input_tx = FVCFP_ZhaoBurge.read_jsonfile(input_transform)
    except AssertionError as e:
        log.warning(
            "Failed to read input transfo as Zhao Burge, try polynomial...")
        input_tx = FVCFP_Polynomial.read_jsonfile(input_transform)

    if metrology_pinholes_table is None:

        filename = resource_filename('desimeter', "data/fp-metrology.csv")
        if not os.path.isfile(filename):
            log.error("cannot find {}".format(filename))
            raise IOError("cannot find {}".format(filename))
        log.info("reading metrology in {}".format(filename))
        metrology_table = Table.read(filename, format="csv")

        log.debug("keep only the pinholes")
        metrology_pinholes_table = metrology_table[:][
            metrology_table["PINHOLE_ID"] > 0]

        # use input transform to convert X_FP,Y_FP to XPIX,YPIX
        xpix, ypix = input_tx.fp2fvc(metrology_pinholes_table["X_FP"],
                                     metrology_pinholes_table["Y_FP"])
        metrology_pinholes_table["XPIX"] = xpix
        metrology_pinholes_table["YPIX"] = ypix

        log.debug("define fiducial location as central dot")
        metrology_fiducials_table = metrology_pinholes_table[:][
            metrology_pinholes_table["PINHOLE_ID"] == 4]

    # find fiducials candidates
    log.info("select spots with at least two close neighbors (in pixel units)")
    xy = np.array([spots["XPIX"], spots["YPIX"]]).T
    tree = KDTree(xy)
    measured_spots_distances, measured_spots_indices = tree.query(
        xy, k=4, distance_upper_bound=separation)
    number_of_neighbors = np.sum(measured_spots_distances < separation, axis=1)
    fiducials_candidates_indices = np.where(
        number_of_neighbors >= 3)[0]  # including self, so at least 3 pinholes

    # match candidates to fiducials from metrology

    log.info(
        "first match {} fiducials candidates to metrology ({}) with iterative fit"
        .format(fiducials_candidates_indices.size,
                len(metrology_fiducials_table)))
    x1 = spots["XPIX"][fiducials_candidates_indices]
    y1 = spots["YPIX"][fiducials_candidates_indices]
    x2 = metrology_fiducials_table["XPIX"]  # do I need to do this?
    y2 = metrology_fiducials_table["YPIX"]

    nloop = 20
    saved_median_distance = 0
    for loop in range(nloop):
        indices_2, distances = match_same_system(x1, y1, x2, y2)
        mdist = np.median(distances[indices_2 >= 0])
        if loop < nloop - 1:
            maxdistance = max(10, 3. * 1.4 * mdist)
        else:  # final iteration
            maxdistance = 10  # pixel
        selection = np.where((indices_2 >= 0) & (distances < maxdistance))[0]
        log.info("iter #{} median_dist={} max_dist={} matches={}".format(
            loop, mdist, maxdistance, selection.size))
        corr21 = SimpleCorr()
        corr21.fit(x2[indices_2[selection]], y2[indices_2[selection]],
                   x1[selection], y1[selection])
        x2, y2 = corr21.apply(x2, y2)
        if np.abs(saved_median_distance - mdist) < 0.0001:
            break  # no more improvement
        saved_median_distance = mdist

    # use same coord system match (note we now match the otherway around)
    indices_1, distances = match_same_system(x2, y2, x1, y1)
    maxdistance = 10.  # FVC pixels
    selection = np.where((indices_1 >= 0) & (distances < maxdistance))[0]
    fiducials_candidates_indices = fiducials_candidates_indices[
        indices_1[selection]]
    matching_known_fiducials_indices = selection

    log.debug(
        "mean distance = {:4.2f} pixels for {} matched and {} known fiducials".
        format(np.mean(distances[distances < maxdistance]),
               fiducials_candidates_indices.size,
               metrology_fiducials_table["XPIX"].size))

    log.debug("now matching pinholes ...")

    nspots = spots["XPIX"].size
    if 'LOCATION' not in spots.dtype.names:
        spots.add_column(Column(np.zeros(nspots, dtype=int)), name='LOCATION')
    if 'PINHOLE_ID' not in spots.dtype.names:
        spots.add_column(Column(np.zeros(nspots, dtype=int)),
                         name='PINHOLE_ID')

    for index1, index2 in zip(fiducials_candidates_indices,
                              matching_known_fiducials_indices):
        location = metrology_fiducials_table["LOCATION"][index2]

        # get indices of all pinholes for this matched fiducial
        # note we now use the full pinholes metrology table
        pi1 = measured_spots_indices[index1][
            measured_spots_distances[index1] < separation]
        pi2 = np.where(metrology_pinholes_table["LOCATION"] == location)[0]

        x1 = spots["XPIX"][pi1]
        y1 = spots["YPIX"][pi1]

        x2 = metrology_pinholes_table["XPIX"][pi2]
        y2 = metrology_pinholes_table["YPIX"][pi2]

        indices_2, distances = match_arbitrary_translation_dilatation(
            x1, y1, x2, y2)

        metrology_pinhole_ids = metrology_pinholes_table["PINHOLE_ID"][pi2]
        pinhole_ids = np.zeros(x1.size, dtype=int)
        matched = (indices_2 >= 0)
        pinhole_ids[matched] = metrology_pinhole_ids[indices_2[matched]]

        spots["LOCATION"][pi1[matched]] = location
        spots["PINHOLE_ID"][pi1[matched]] = pinhole_ids[matched]

        if np.sum(pinhole_ids == 0) > 0:
            log.warning(
                "only matched pinholes {} for {} detected at LOCATION {} xpix~{} ypix~{}"
                .format(pinhole_ids[pinhole_ids > 0], x1.size, location,
                        int(np.mean(x1)), int(np.mean(y1))))

        # check duplicates
        if np.unique(
                pinhole_ids[pinhole_ids > 0]).size != np.sum(pinhole_ids > 0):
            xfp = np.mean(metrology_pinholes_table[pi2]["X_FP"])
            yfp = np.mean(metrology_pinholes_table[pi2]["Y_FP"])
            log.warning(
                "duplicate(s) pinhole ids in {} at LOCATION={} xpix~{} ypix~{} xfp~{} yfp~{}"
                .format(pinhole_ids, location, int(np.mean(x1)),
                        int(np.mean(y1)), int(xfp), int(yfp)))
            bc = np.bincount(pinhole_ids[pinhole_ids > 0])
            duplicates = np.where(bc > 1)[0]
            for duplicate in duplicates:
                log.warning(
                    "Unmatch ambiguous pinhole id = {}".format(duplicate))
                selection = (spots["LOCATION"]
                             == location) & (spots["PINHOLE_ID"] == duplicate)
                spots["PINHOLE_ID"][selection] = 0

    spots["PETAL_LOC"] = spots["LOCATION"] // 1000
    spots["DEVICE_LOC"] = spots["LOCATION"] % 1000

    n_matched_pinholes = np.sum(spots["PINHOLE_ID"] > 0)
    n_matched_fiducials = np.sum(spots["PINHOLE_ID"] == 4)
    log.info("matched {} pinholes from {} fiducials".format(
        n_matched_pinholes, n_matched_fiducials))
    return spots
示例#14
0
def detectspots(fvcimage,threshold=500,nsig=7,psf_sigma=1.) :
    """
    Detect spots in a fiber view image and measure their centroids and flux
    Args:
        fvcimage : 2D numpy array

    Optional:
       threshold : float, use max of this threshold in counts/pixel and nsig*rms to do a first
                    detection of peaks 
    returns astropy.Table with spots, columns are xpix,ypix,xerr,yerr,counts
    """

    log = get_logger()

    
    n0=fvcimage.shape[0]
    n1=fvcimage.shape[1]
    
    

    # find peaks = local maximum above threshold
    log.info("gaussian convolve with sigma = {:2.1f} pixels".format(psf_sigma))
    convolved_image=gaussian_convolve(fvcimage,psf_sigma)
    
    # measure pedestal and rms
    # look the values of a random subsample of the image
    nrand=20000
    ii0=(np.random.uniform(size=nrand)*n0).astype(int)
    ii1=(np.random.uniform(size=nrand)*n1).astype(int)
    vals=convolved_image[ii0,ii1].ravel()
    mval=np.median(vals)
    #- normalized median absolute deviation as robust version of RMS
    #- see https://en.wikipedia.org/wiki/Median_absolute_deviation
    rms=1.4826*np.median(np.abs(vals-mval)) 
    ok=np.abs(vals-mval)<4*rms
    mval=np.mean(vals[ok])
    rms=np.std(vals[ok])
    log.info("pedestal={:4.2f} rms={:4.2f}".format(mval,rms))
    # remove mean; if fvcimage was integer input, this will cast to float64
    fvcimage = fvcimage - mval
    convolved_image      -= mval
    
    #import matplotlib.pyplot as plt
    #plt.hist(vals[ok]-mval,bins=100)
    #plt.show()
    
    if threshold is None :
        threshold=nsig*rms
    else :
        threhold=max(threshold,nsig*rms)
    
    peaks=np.zeros((n0,n1))
    peaks[1:-1,1:-1]=(convolved_image[1:-1,1:-1]>convolved_image[:-2,1:-1])*(convolved_image[1:-1,1:-1]>convolved_image[2:,1:-1\
])*(convolved_image[1:-1,1:-1]>convolved_image[1:-1,:-2])*(convolved_image[1:-1,1:-1]>convolved_image[1:-1,2:])*(convolved_image[1:-1,1:-1]>threshold)

    # loop on peaks
    peakindices=np.where(peaks.ravel()>0)[0]
    npeak=len(peakindices)
    if npeak == 0 :
        log.error("no spot found")
        raise RuntimeError("no spot found")
    else :
        log.info("found {} peaks".format(npeak))
                 
    xpix=np.zeros(npeak)
    ypix=np.zeros(npeak)
    xerr=np.zeros(npeak)
    yerr=np.zeros(npeak)
    counts=np.zeros(npeak)
    hw=3
    
    xoffset=0. # would need offsets of 1 to match with POS file. not sure what is the best choice here.
    yoffset=0.
    if xoffset !=0 or yoffset !=0 :
        log.warning("Applying offsets x += {} and y += {} (to match with others, like the POS files)".format(xoffset,yoffset))
    if xoffset == 0 and yoffset == 0 :
        log.debug("Here center of first pixel has coord=(0,0); so we expect offsets of 1 with respect to coordinates in .pos files.")
    
    for j,index in enumerate(peakindices) :
        i0=index//n1
        i1=index%n1
        if 1 : #try :
            x,y,ex,ey,c=fitcentroid(fvcimage[i0-hw:i0+hw+1,i1-hw:i1+hw+1],noise=rms)
            xpix[j] = x + i1 + xoffset #  x is along axis=1 in python , also adding offset (definition of pixel coordinates)
            ypix[j] = y + i0 + yoffset #  y is along axis=0 in python , also adding offset (definition of pixel coordinates)
            xerr[j] = ex
            yerr[j] = ey
            counts[j] = c
        #except Exception as e:
        #    log.error("failed to fit a centroid {}".format(e))
            
        log.debug("{} x={} y={} counts={}".format(j,xpix[j],ypix[j],counts[j]))
    
    #log.warning("Would need some cleaning here for multiple detections of same spot")

    table = Table([xpix,ypix,xerr,yerr,counts],names=("XPIX","YPIX","XERR","YERR","COUNTS"))
    return table
示例#15
0
    def read_guide_stars_catalog(self, filename, max_sep_arcsec=2.):

        log = get_logger()
        log.info("reading guide stars in {}".format(filename))

        # here we could do some conversion of column names
        catalog = Table.read(filename)

        if "mjd_obs" in catalog.dtype.names:
            self.mjd = np.mean(catalog["mjd_obs"])
            log.info("use mjd={} from catalog['mjd_obs']".format(self.mjd))

        if (not "xcentroid" in catalog.dtype.names) or (
                not "ra_gaia" in catalog.dtype.names):
            log.error(
                "I can only deal with Aaron's catalogs with columns xcentroid,ycentroid,ra_gaia,dec_gaia, sorry"
            )
            raise RuntimeError(
                "I can only deal with Aaron's catalogs with columns xcentroid,ycentroid,ra_gaia,dec_gaia, sorry"
            )

        log.info(
            "selection stars for which we have a good match (< {} arcsec)".
            format(max_sep_arcsec))

        if all([_ in catalog.columns for _ in ['pmra', 'pmdec', 'ref_epoch']]):

            if self.mjd is None:
                log.error(
                    "Cannot compute proper motion correction because mjd=None")
                raise RuntimeError(
                    "Cannot compute proper motion correction because mjd=None")

            # if proper motions and reference epochs are there
            pmra = catalog['pmra']
            pmdec = catalog['pmdec']
            # if unknown, zero out the pms
            pmra[~np.isfinite(pmra)] = 0
            pmdec[~np.isfinite(pmdec)] = 0

            cur_year = Time(self.mjd, format='mjd').to_value(format='jyear')
            # observation time in decimal years (like 2020.3)
            ref_epoch = catalog['ref_epoch']
            dra = (cur_year - ref_epoch) * pmra / 3600e3 / cosd(
                catalog['dec_gaia'])
            ddec = (cur_year - ref_epoch) * pmdec / 3600e3

            # add pm and rename columns
            catalog['ra_gaia'] += dra
            catalog['dec_gaia'] += ddec
            catalog.rename_column('ra_gaia', 'ra_gaia_with_pm')
            catalog.rename_column('dec_gaia', 'dec_gaia_with_pm')
            ra_column = 'ra_gaia_with_pm'
            dec_column = 'dec_gaia_with_pm'

        else:
            ra_column = 'ra_gaia'
            dec_column = 'dec_gaia'
            log.warning("No proper motion info in catalog")

        match_dra = (catalog["ra"] - catalog[ra_column]) * cosd(
            catalog[dec_column]) * 3600.  # arcsec
        match_ddec = (catalog["dec"] - catalog[dec_column]) * 3600.  # arcsec

        dr = np.hypot(match_dra, match_ddec)
        selection = (dr < max_sep_arcsec)  # arcsec
        if np.sum(selection) == 0:
            log.error("no star is matched with sufficient precision!")
            raise RuntimeError("no star is matched with sufficient precision!")

        return catalog[:][selection]