Esempio n. 1
0
    def run(self, dataSlice, slicePoint=None):
        pm = np.array(self.simobj['PM_out'])
        mag = np.array(self.simobj['MAG'])
        obs = np.where(dataSlice[self.mjdCol] < min(dataSlice[self.mjdCol]) +
                       365 * self.surveyduration)
        np.random.seed(5000)
        mjd = dataSlice[self.mjdCol][obs]
        flt = dataSlice[self.filterCol][obs]
        if (self.f in flt):
            snr = m52snr(mag[:, np.newaxis], dataSlice[self.m5Col][obs])
            row, col = np.where(snr > self.snr_lim)

            Times = np.sort(mjd)
            dt = np.array(list(combinations(Times, 2)))
            if np.size(dt) > 0:
                DeltaTs = np.absolute(np.subtract(dt[:, 0], dt[:, 1]))
                DeltaTs = np.unique(DeltaTs)

                dt_pm = 0.05 * np.amin(
                    dataSlice[self.seeingCol]) / pm[np.unique(row)]
                selection = np.where((dt_pm > min(DeltaTs))
                                     & (dt_pm < max(DeltaTs)))

                #precis = astrom_precision(dataSlice[self.seeingCol][obs], snr[row,:])
                #sigmapm= sigma_slope(dataSlice[self.mjdCol][obs], precis)*365.25*1e3

                objRate = 0.7  # how many go off per day
                nObj = np.size(pm[selection])
                m0s = mag[selection]
                t = dataSlice[self.mjdCol][obs] - dataSlice[self.mjdCol].min()
                detected = 0
                # Loop though each generated transient and decide if it was detected ,
                # This could be a more complicated piece of code, for example demanding  ,
                # A color measurement in a night. ,
                durations = dt_pm[selection]
                slopes = np.random.uniform(-3, 3, np.size(selection))
                t0s = np.random.uniform(0, self.surveyduration, nObj)
                lcs = self.lightCurve(t, t0s, m0s, durations, slopes)
                good = m52snr(lcs, dataSlice[self.m5Col][obs]) > self.snr_lim
                detectedTest = good.sum(axis=0)
                detected = np.sum(detectedTest > 2)
                #for i,t0 in enumerate(np.random.uniform(0,self.surveyduration,nObj)):
                #    duration =dt_pm[selection][i]
                #    slope = np.random.uniform(-3,3)
                #    lc = self.lightCurve(t, t0, m0s[i],duration, slope)
                #    good = m52snr(lc,dataSlice[self.m5Col][obs])> self.snr_lim
                #    detectTest = dataSlice[self.m5Col][obs] - lc
                #    if detectTest.max() > 0 and len(good)>2:
                #         detected += 1
                # Return the fraction of transients detected ,
                if float(nObj) == 0:
                    A = np.inf
                else:
                    A = float(nObj)
                    res = float(detected) / A
                    #print('detected fraction:{}'.format(res))
                    return res
Esempio n. 2
0
        def run(self, dataSlice, slicePoint=None): 
            obs = np.where(dataSlice[self.mjdCol]<min(dataSlice[self.mjdCol])+365*self.surveyduration)
            
            deltamag= np.arange(self.MagIterLim[0],self.MagIterLim[1],self.MagIterLim[2])
            out = {}
            for dm in deltamag: 
                
                    if self.mode == 'distance': 
                        pmnew= self.mu_sag /(10**(dm/5)) 
                        mag = self.mag_sag + dm
                    elif self.mode == 'density': 
                        pmnew= self.mu_sag  
                        mag = self.mag_sag + dm
                    else: 
                        print('##### ERROR: the metric is not implemented for this mode.')
                        
                    mjd = dataSlice[self.mjdCol][obs]
                    flt = dataSlice[self.filterCol][obs]
                    if ('g' in flt) and ('r' in flt):
                        
                        # select objects above the limit magnitude threshold 
                        snr = m52snr(mag[:, np.newaxis],dataSlice[self.m5Col][obs])
                        row, col =np.where(snr>self.snr_lim)
                        if self.gap_selection:
                            Times = np.sort(mjd)
                            dt = np.array(list(combinations(Times,2)))
                            DeltaTs = np.absolute(np.subtract(dt[:,0],dt[:,1]))            
                            DeltaTs = np.unique(DeltaTs)
                            if np.size(DeltaTs)>0:
                                         dt_pm = 0.05*np.amin(dataSlice[self.seeingCol])/pmnew[np.unique(row)]
                                         selection = np.where((dt_pm>min(DeltaTs)) & (dt_pm<max(DeltaTs)))
                        else:
                            selection = np.unique(row)
                        precis = astrom_precision(dataSlice[self.seeingCol][obs], snr[row,:])
                        sigmapm= self.sigma_slope(dataSlice[self.mjdCol][obs], precis)
                        Hg = mag[selection]+5*np.log10(pmnew[selection])-10
                        sigmaHg = np.sqrt((mag[selection]/m52snr(mag[selection],np.median(dataSlice[self.m5Col])))**(2)+ (4.715*sigmapm[selection]/np.ceil(pmnew[selection]))**2) 
                        sigmag = np.sqrt((mag[selection]/m52snr(mag[selection],np.median(dataSlice[self.m5Col])))**2+((mag[selection]-self.gr[selection])/m52snr((mag[selection]-self.gr[selection]),np.median(dataSlice[self.m5Col])))**2)
                        err_ellipse = np.pi*sigmaHg*sigmag
                        if self.dataout:
                            CI = np.array([np.nansum((([gr-gcol ])/sigmag)**2 + ((Hg-h)/sigmaHg)**2 <= 1)/np.size(pmnew[selection]) for (gcol,h) in zip(gr,Hg)])                      

                            out[dm] = {'CI':CI,'alpha':np.size(pmnew[selection])/np.size(pmnew) ,'err':err_ellipse,'Hg':Hg,'gr':gr, 'sigmaHg':sigmaHg,'sigmagr':sigmag}
                        else:
                            out[dm] = {'alpha':np.size(pmnew[selection])/np.size(pmnew) ,'err':err_ellipse}
                    else:
                        if self.dataout:
                            out[dm] = {'CI':0,'alpha':0,'err':0,'Hg':Hg,'gr':gr, 'sigmaHg':sigmaHg,'sigmagr':sigmag} 
                        else:
                            out[dm] = {'alpha':0 ,'err':0}
            if self.dataout: 
                return out  
            else:
                if ('g' in flt) and ('r' in flt):
                    res = out[dm]['alpha']/np.nanmean(out[dm]['err'][np.isfinite(out[dm]['err'])])                    
                    return res 
Esempio n. 3
0
 def run(self, dataslice, slicePoint=None):
     filters = np.unique(dataslice['filter'])
     precis = np.zeros(dataslice.size, dtype='float')
     for f in filters:
         observations = np.where(dataslice['filter'] == f)
         if np.size(observations[0]) < 2:
             precis[observations] = self.badval
         else:
             snr = mafUtils.m52snr(self.mags[f],
                dataslice[self.m5Col][observations])
             precis[observations] = mafUtils.astrom_precision(
                 dataslice[self.seeingCol][observations], snr)
             precis[observations] = np.sqrt(precis[observations]**2 + self.atm_err**2)
     good = np.where(precis != self.badval)
     result = mafUtils.sigma_slope(dataslice['expMJD'][good], precis[good])
     result = result*365.25*1e3 #convert to mas/yr
     if (self.normalize) & (good[0].size > 0):
         new_dates=dataslice['expMJD'][good]*0
         nDates = new_dates.size
         new_dates[nDates/2:] = self.baseline*365.25
         result = (mafUtils.sigma_slope(new_dates,  precis[good])*365.25*1e3)/result
     # Observations that are very close together can still fail
     if np.isnan(result):
         result = self.badval
     return result
Esempio n. 4
0
    def run(self, dataSlice, slicePoint=None):

        snr = np.zeros(len(dataSlice), dtype='float')
        for filt in self.filters:
            inFilt = np.where(dataSlice[self.filterCol] == filt)
            snr[inFilt] = mafUtils.m52snr(self.mags[filt],
                                          dataSlice[self.m5Col][inFilt])

        position_errors = np.sqrt(
            mafUtils.astrom_precision(dataSlice[self.seeingCol], snr)**2 +
            self.atm_err**2)

        x_coord = np.tan(dataSlice['zenithDistance']) * np.sin(
            dataSlice[self.PACol])
        # Things should be the same for RA and dec.
        # Now I want to compute the error if I interpolate/extrapolate to +/-1.

        # function is of form, y=ax. a=y/x. da = dy/x.
        # Only strictly true if we know the unshifted position. But this should be a reasonable approx.
        slope_uncerts = position_errors / x_coord
        total_slope_uncert = 1. / np.sqrt(np.sum(1. / slope_uncerts**2))

        # So, this will be the uncertainty in the RA or Dec offset at x= +/- 1.
        result = total_slope_uncert

        return result
Esempio n. 5
0
 def run(self, dataslice, slicePoint=None):
     filters = np.unique(dataslice['filter'])
     filters = [str(f) for f in filters]
     precis = np.zeros(dataslice.size, dtype='float')
     for f in filters:
         observations = np.where(dataslice['filter'] == f)
         if np.size(observations[0]) < 2:
             precis[observations] = self.badval
         else:
             snr = mafUtils.m52snr(self.mags[f],
                                   dataslice[self.m5Col][observations])
             precis[observations] = mafUtils.astrom_precision(
                 dataslice[self.seeingCol][observations], snr)
             precis[observations] = np.sqrt(precis[observations]**2 +
                                            self.atm_err**2)
     good = np.where(precis != self.badval)
     result = mafUtils.sigma_slope(dataslice[self.mjdCol][good],
                                   precis[good])
     result = result * 365.25 * 1e3  # Convert to mas/yr
     if (self.normalize) & (good[0].size > 0):
         new_dates = dataslice[self.mjdCol][good] * 0
         nDates = new_dates.size
         new_dates[nDates // 2:] = self.baseline * 365.25
         result = (mafUtils.sigma_slope(new_dates, precis[good]) * 365.25 *
                   1e3) / result
     # Observations that are very close together can still fail
     if np.isnan(result):
         result = self.badval
     return result
Esempio n. 6
0
    def run(self, dataSlice, slicePoint=None):

        # punt if we don't have enough points
        if dataSlice.size < self.means.size + 3:
            return self.badval

        # Generate independt variable for light curve
        t = np.empty(dataSlice.size,
                     dtype=zip(['time', 'filter'], [float, '|S1']))
        t['time'] = dataSlice[self.mjdCol] - dataSlice[self.mjdCol].min()
        t['filter'] = dataSlice[self.filterCol]

        if 'distMod' in slicePoint.keys():
            mags = self.means + slicePoint['distMod']
        else:
            mags = self.means
        trueParams = np.append(
            np.array([self.period, self.phase, self.amplitude]), mags)
        trueLC = periodicStar(t, *trueParams)

        fits = np.zeros((self.nMonte, trueParams.size), dtype=float)
        for i in np.arange(self.nMonte):
            snr = m52snr(trueLC, dataSlice[self.m5Col])
            dmag = 2.5 * np.log10(1. + 1. / snr)
            noise = np.random.randn(trueLC.size) * dmag
            # Suppress warnings about failing on covariance
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                # If it fails to converge, save values that should fail later
                try:
                    parmVals, pcov = curve_fit(periodicStar,
                                               t,
                                               trueLC + noise,
                                               p0=trueParams,
                                               sigma=dmag)
                except:
                    parmVals = trueParams * 0 - 666
            fits[i, :] = parmVals

        # Throw out any parameters if there are no observations in that filter
        ufilters = np.unique(dataSlice[self.filterCol])
        if ufilters.size < 9:
            for key in self.filter2index.keys():
                if key not in ufilters:
                    fits[:, self.filter2index[key]] = -np.inf

        # Find the fraction of fits that meet the "well-fit" criteria
        periodFracErr = (fits[:, 0] - trueParams[0]) / trueParams[0]
        ampFracErr = (fits[:, 2] - trueParams[2]) / trueParams[2]
        magErr = fits[:, 3:] - trueParams[3:]
        nBands = np.zeros(magErr.shape, dtype=int)
        nBands[np.where(magErr <= self.magTol)] = 1
        nBands = np.sum(nBands, axis=1)
        nRecovered = np.size(
            np.where((periodFracErr <= self.periodTol)
                     & (ampFracErr <= self.ampTol)
                     & (nBands >= self.nBands))[0])
        fracRecovered = float(nRecovered) / self.nMonte
        return fracRecovered
    def run(self, dataSlice, slicePoint=None):
        """
        Calculate the detectability of a transient with the specified SED.

        If self.output_data is True, then returns the full lightcurve for each
        object instead of the total number of transients that are detected.

        Parameters:
        ----------
            dataSlice : numpy.array
                Numpy structured array containing the data related to the visits
                provided by the slicer.
            slicePoint : dict, optional
                Dictionary containing information about the slicepoint currently
                active in the slicer.

        Returns:
        -------
            float or dict
                The fraction of transients that could be detected.
                (if output_data is False) Otherwise, a dictionary
                with arrays of 'transient_id', 'lcMag', 'detected', 'expMJD',
                'SNR', 'filter', 'epoch'
        """

        dataSlice = self.setup_run_metric_variables(dataSlice)
        self.initialize_phase_loop_variables(dataSlice)

        # Consider each different 'phase shift' separately.
        # We then just have a series of lightcurves, taking place back-to-back.
        for time_shift in self.time_phase_shifts:
            self.setup_phase_shift_dependent_variables(time_shift, dataSlice)

            # Generate the actual light curve magnitudes and SNR
            self.make_lightcurve(self.observation_epoch,
                                 dataSlice[self.filterCol])
            self.light_curve_SNRs = m52snr(self.light_curve_mags,
                                           dataSlice[self.m5Col])

            # Check observations above the defined threshold for detection.
            self.evaluate_SNR_thresholds(dataSlice)
            # With useable observations computed, evaluate all detection criteria
            self.evaluate_all_detection_criteria(dataSlice)

        if self.output_data:
            # Output all the light curves, regardless of detection threshhold,
            # but indicate which were 'detected'.
            # Only returns for one phase shift, not all.
            return {
                "transient_id": self.transient_id,
                "expMJD": dataSlice[self.mjdCol],
                "epoch": self.observation_epoch,
                "filter": dataSlice[self.filterCol],
                "lcMag": self.light_curve_mags,
                "SNR": self.light_curve_SNRs,
                "detected": self.transient_detected,
            }
        else:
            return float(self.num_detected) / self.max_num_transients
    def run(self, dataSlice, slicePoint=None):
        """
        Calculate the detectability of a transient with the specified SED.

        If self.output_data is True, then returns the full lightcurve for each
        object instead of the total number of transients that are detected.

        Parameters:
        ----------
            dataSlice : numpy.array
                Numpy structured array containing the data related to the visits
                provided by the slicer.
            slicePoint : dict, optional
                Dictionary containing information about the slicepoint currently
                active in the slicer.

        Returns:
        -------
            float or dict
                The fraction of transients that could be detected.
                (if output_data is False) Otherwise, a dictionary
                with arrays of 'transient_id', 'lcMag', 'detected', 'expMJD',
                'SNR', 'filter', 'epoch'
        """

        dataSlice = self.setup_run_metric_variables(dataSlice)
        self.initialize_phase_loop_variables(dataSlice)

        # Consider each different 'phase shift' separately.
        # We then just have a series of lightcurves, taking place back-to-back.
        for time_shift in self.time_phase_shifts:
            self.setup_phase_shift_dependent_variables(time_shift, dataSlice)

            # Generate the actual light curve magnitudes and SNR
            self.make_lightcurve(self.observation_epoch, dataSlice[self.filterCol])
            self.light_curve_SNRs = m52snr(self.light_curve_mags, dataSlice[self.m5Col])

            # Check observations above the defined threshold for detection.
            self.evaluate_SNR_thresholds(dataSlice)
            # With useable observations computed, evaluate all detection criteria
            self.evaluate_all_detection_criteria(dataSlice)

        if self.output_data:
            # Output all the light curves, regardless of detection threshhold,
            # but indicate which were 'detected'.
            # Only returns for one phase shift, not all.
            return {
                "transient_id": self.transient_id,
                "expMJD": dataSlice[self.mjdCol],
                "epoch": self.observation_epoch,
                "filter": dataSlice[self.filterCol],
                "lcMag": self.light_curve_mags,
                "SNR": self.light_curve_SNRs,
                "detected": self.transient_detected,
            }
        else:
            return float(self.num_detected) / self.max_num_transients
Esempio n. 9
0
    def _calc_amp(self, dataSlice):
        """Fractional SNR on the amplitude, testing for a variety of possible phases
        """
        phases = np.arange(0, np.pi, np.pi / 8.)
        snr = m52snr(self.starMag, dataSlice[self.m5Col])
        amp_snrs = np.sin(dataSlice[self.mjdCol] / self.period * 2 * np.pi +
                          phases[:, np.newaxis]) * snr
        amp_snr = np.min(np.sqrt(np.sum(amp_snrs**2, axis=1)))

        max_snr = np.sqrt(np.sum(snr**2))
        return amp_snr / max_snr
    def run(self, dataSlice, slicePoint=None):

        # Bail if we don't have enough points
        if dataSlice.size < self.means.size+3:
            return self.badval

        # Generate input for true light curve
        t = np.empty(dataSlice.size, dtype=list(zip(['time','filter'],[float,'|S1'])))
        t['time'] = dataSlice[self.mjdCol]-dataSlice[self.mjdCol].min()
        t['filter'] = dataSlice[self.filterCol]

        # If we are adding a distance modulus to the magnitudes
        if 'distMod' in list(slicePoint.keys()):
            mags = self.means + slicePoint['distMod']
        else:
            mags = self.means
        trueParams = np.append(np.array([self.period, self.phase, self.amplitude]), mags)
        trueLC = periodicStar(t, *trueParams)

        # Array to hold the fit results
        fits = np.zeros((self.nMonte,trueParams.size),dtype=float)
        for i in np.arange(self.nMonte):
            snr = m52snr(trueLC,dataSlice[self.m5Col])
            dmag = 2.5*np.log10(1.+1./snr)
            noise = np.random.randn(trueLC.size)*dmag
            # Suppress warnings about failing on covariance
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                # If it fails to converge, save values that should fail later
                try:
                    parmVals, pcov = curve_fit(periodicStar, t, trueLC+noise, p0=trueParams, sigma=dmag)
                except:
                    parmVals = trueParams*0-666
            fits[i,:] = parmVals

        # Throw out any magnitude fits if there are no observations in that filter
        ufilters = np.unique(dataSlice[self.filterCol])
        if ufilters.size < 9:
            for key in list(self.filter2index.keys()):
                if key not in ufilters:
                    fits[:,self.filter2index[key]] = -np.inf

        # Find the fraction of fits that meet the "well-fit" criteria
        periodFracErr = (fits[:,0]-trueParams[0])/trueParams[0]
        ampFracErr = (fits[:,2]-trueParams[2])/trueParams[2]
        magErr = fits[:,3:]-trueParams[3:]
        nBands = np.zeros(magErr.shape,dtype=int)
        nBands[np.where(magErr <= self.magTol)] = 1
        nBands = np.sum(nBands, axis=1)
        nRecovered = np.size(np.where( (periodFracErr <= self.periodTol) &
                                       (ampFracErr <= self.ampTol) &
                                       (nBands >= self.nBands) )[0])
        fracRecovered = float(nRecovered)/self.nMonte
        return fracRecovered
Esempio n. 11
0
 def run(self, dataslice, slicePoint=None):
     filters = np.unique(dataslice[self.filterCol])
     snr = np.zeros(len(dataslice), dtype='float')
     # compute SNR for all observations
     for filt in filters:
         good = np.where(dataslice[self.filterCol] == filt)
         snr[good] = mafUtils.m52snr(self.mags[filt], dataslice[self.m5Col][good])
     position_errors = np.sqrt(mafUtils.astrom_precision(dataslice[self.seeingCol], snr)**2+self.atm_err**2)
     sigma = self._final_sigma(position_errors,dataslice['ra_pi_amp'],dataslice['dec_pi_amp'] )
     if self.normalize:
         # Leave the dec parallax as zero since one can't have ra and dec maximized at the same time.
         sigma = self._final_sigma(position_errors,dataslice['ra_pi_amp']*0+1.,dataslice['dec_pi_amp']*0 )/sigma
     return sigma
Esempio n. 12
0
 def run(self, dataSlice, slicePoint=None):
     # The idea here is that we calculate position errors (in RA and Dec) for all observations.
     # Then we generate arrays of the parallax offsets (delta RA parallax = ra_pi_amp, etc)
     #  and the DCR offsets (delta RA DCR = ra_dcr_amp, etc), and just add them together into one
     #  RA  (and Dec) offset. Then, we try to fit for how we combined these offsets, but while
     #  considering the astrometric noise. If we can figure out that we just added them together
     # (i.e. the curve_fit result is [a=1, b=1] for the function _positions above)
     # then we should be able to disentangle the parallax and DCR offsets when fitting 'for real'.
     # compute SNR for all observations
     snr = np.zeros(len(dataSlice), dtype='float')
     for filt in self.filters:
         inFilt = np.where(dataSlice[self.filterCol] == filt)
         snr[inFilt] = mafUtils.m52snr(self.mags[filt],
                                       dataSlice[self.m5Col][inFilt])
     # Compute the centroiding uncertainties
     # Temporary fix for FWHMeff to FWHMgeom calculation.
     if self.seeingCol.endswith('Eff'):
         seeing = dataSlice[self.seeingCol] * 0.822 + 0.052
     else:
         seeing = dataSlice[self.seeingCol]
     position_errors = np.sqrt(
         mafUtils.astrom_precision(seeing, snr)**2 + self.atm_err**2)
     # Construct the vectors of RA/Dec offsets. xdata is the "input data". ydata is the "output".
     xdata = np.empty((2, dataSlice.size * 2), dtype=float)
     xdata[0, :] = np.concatenate(
         (dataSlice['ra_pi_amp'], dataSlice['dec_pi_amp']))
     xdata[1, :] = np.concatenate(
         (dataSlice['ra_dcr_amp'], dataSlice['dec_dcr_amp']))
     ydata = np.sum(xdata, axis=0)
     # Use curve_fit to compute covariance between parallax and dcr amplitudes
     # Set the initial guess slightly off from the correct [1,1] to make sure it iterates.
     popt, pcov = curve_fit(self._positions,
                            xdata,
                            ydata,
                            p0=[1.1, 0.9],
                            sigma=np.concatenate(
                                (position_errors, position_errors)),
                            absolute_sigma=True)
     # Catch if the fit failed to converge on the correct solution.
     if np.max(np.abs(popt - np.array([1., 1.]))) > self.tol:
         return self.badval
     # Covariance between best fit parallax amplitude and DCR amplitude.
     cov = pcov[1, 0]
     # Convert covarience between parallax and DCR amplitudes to normalized correlation
     perr = np.sqrt(np.diag(pcov))
     correlation = cov / (perr[0] * perr[1])
     result = correlation
     # This can throw infs.
     if np.isinf(result):
         result = self.badval
     return result
Esempio n. 13
0
    def _calc_phase(self, dataSlice):
        """1 is perfectly balanced phase coverage, 0 is no effective coverage.
        """
        angles = dataSlice[self.mjdCol] % self.period
        angles = angles / self.period * 2. * np.pi
        x = np.cos(angles)
        y = np.sin(angles)

        snr = m52snr(self.starMag, dataSlice[self.m5Col])
        x_ave = np.average(x, weights=snr)
        y_ave = np.average(y, weights=snr)

        vector_off = np.sqrt(x_ave**2 + y_ave**2)
        return 1. - vector_off
Esempio n. 14
0
 def run(self, dataslice, slicePoint=None):
     filters = np.unique(dataslice[self.filterCol])
     if hasattr(filters[0], 'decode'):
         filters = [str(f.decode('utf-8')) for f in filters]
     snr = np.zeros(len(dataslice), dtype='float')
     # compute SNR for all observations
     for filt in filters:
         good = np.where(dataslice[self.filterCol] == filt)
         snr[good] = mafUtils.m52snr(self.mags[str(filt)], dataslice[self.m5Col][good])
     position_errors = np.sqrt(mafUtils.astrom_precision(dataslice[self.seeingCol],
                                                         snr)**2+self.atm_err**2)
     sigma = self._final_sigma(position_errors, dataslice['ra_pi_amp'], dataslice['dec_pi_amp'])
     if self.normalize:
         # Leave the dec parallax as zero since one can't have ra and dec maximized at the same time.
         sigma = self._final_sigma(position_errors,
                                   dataslice['ra_pi_amp']*0+1., dataslice['dec_pi_amp']*0)/sigma
     return sigma
Esempio n. 15
0
    def run(self, dataSlice, slicePoint=None):

        if np.size(dataSlice) < 2:
            return self.badval
        filters = np.unique(dataSlice[self.filterCol])
        snr = np.zeros(len(dataSlice), dtype='float')
        # compute SNR for all observations
        for filt in filters:
            good = np.where(dataSlice[self.filterCol] == filt)
            snr[good] = mafUtils.m52snr(self.mags[filt], dataSlice[self.m5Col][good])
        # Compute total parallax distance
        pf = np.sqrt(dataSlice['ra_pi_amp']**2+dataSlice['dec_pi_amp']**2)
        # Correlation between parallax factor and hour angle
        aboveLimit = np.where(snr >= self.snrLimit)[0]
        if np.size(aboveLimit) < 2:
            return self.badval
        rho,p = spearmanr(pf[aboveLimit], dataSlice[self.haCol][aboveLimit])
        return rho
Esempio n. 16
0
    def run(self, dataSlice, slicePoint=None):

        if np.size(dataSlice) < 2:
            return self.badval
        filters = np.unique(dataSlice[self.filterCol])
        snr = np.zeros(len(dataSlice), dtype='float')
        # compute SNR for all observations
        for filt in filters:
            good = np.where(dataSlice[self.filterCol] == filt)
            snr[good] = mafUtils.m52snr(self.mags[filt],
                                        dataSlice[self.m5Col][good])
        # Compute total parallax distance
        pf = np.sqrt(dataSlice['ra_pi_amp']**2 + dataSlice['dec_pi_amp']**2)
        # Correlation between parallax factor and hour angle
        aboveLimit = np.where(snr >= self.snrLimit)[0]
        if np.size(aboveLimit) < 2:
            return self.badval
        rho, p = spearmanr(pf[aboveLimit], dataSlice[self.haCol][aboveLimit])
        return rho
Esempio n. 17
0
    def run(self, dataSlice, slicePoint=None):
        result = 0
        n_pts = np.size(dataSlice[self.mjdCol])
        n_filt = np.size(np.unique(dataSlice[self.filterCol]))

        # If we had a correct model with phase, amplitude, period, mean_mags, then chi_squared/DoF would be ~1 with 3+n_filt free parameters.
        # The mean is one free parameter
        p1 = n_filt
        p2 = 3. + n_filt
        chi_sq_2 = 1. * (n_pts - p2)

        u_filters = np.unique(dataSlice[self.filterCol])

        if n_pts > p2:
            for period, starMag, amplitude in zip(self.periods, self.starMags,
                                                  self.amplitudes):
                chi_sq_1 = 0
                mags = utils.stellarMags(self.SedTemplate, rmag=starMag)
                for filtername in u_filters:
                    in_filt = np.where(
                        dataSlice[self.filterCol] == filtername)[0]
                    lc = amplitude * np.sin(
                        dataSlice[self.mjdCol][in_filt] *
                        (np.pi * 2) / period) + mags[filtername]
                    snr = m52snr(lc, dataSlice[self.m5Col][in_filt])
                    delta_m = 2.5 * np.log10(1. + 1. / snr)
                    weights = 1. / (delta_m**2)
                    weighted_mean = np.sum(weights * lc) / np.sum(weights)
                    chi_sq_1 += np.sum(((lc - weighted_mean)**2 / delta_m**2))
                # Yes, I'm fitting magnitudes rather than flux. At least I feel kinda bad about it.
                # F-test for nested models Regression problems:  https://en.wikipedia.org/wiki/F-test
                f_numerator = (chi_sq_1 - chi_sq_2) / (p2 - p1)
                f_denom = 1.  # This is just reduced chi-squared for the more complicated model, so should be 1.
                f_val = f_numerator / f_denom
                # Has DoF (p2-p1, n-p2)
                # https://stackoverflow.com/questions/21494141/how-do-i-do-a-f-test-in-python/21503346
                p_value = scipy.stats.f.sf(f_val, p2 - p1, n_pts - p2)
                if np.isfinite(p_value):
                    if p_value < self.sig_level:
                        result += 1

        return result
Esempio n. 18
0
    def run(self, dataSlice, slicePoint=None):

        snr = np.zeros(len(dataSlice), dtype='float')
        # compute SNR for all observations
        for filt in self.filters:
            inFilt = np.where(dataSlice[self.filterCol] == filt)
            snr[inFilt] = mafUtils.m52snr(self.mags[filt],
                                          dataSlice[self.m5Col][inFilt])
        # Compute the centroiding uncertainties
        position_errors = np.sqrt(
            mafUtils.astrom_precision(dataSlice[self.seeingCol], snr)**2 +
            self.atm_err**2)

        # Construct the vectors
        xdata = np.empty((2, dataSlice.size * 2), dtype=float)
        xdata[0, :] = np.concatenate(
            (dataSlice['ra_pi_amp'], dataSlice['dec_pi_amp']))
        xdata[1, :] = np.concatenate(
            (dataSlice['ra_dcr_amp'], dataSlice['dec_dcr_amp']))
        ydata = np.sum(xdata, axis=0)
        # Use curve_fit to compute covariance between parallax and dcr amplitudes
        # Set the initial guess slightly off from the correct [1,1] to make sure it iterates.
        popt, pcov = curve_fit(self._positions,
                               xdata,
                               ydata,
                               p0=[1.1, 0.9],
                               sigma=np.concatenate(
                                   (position_errors, position_errors)),
                               absolute_sigma=True)
        # Catch if the fit failed to converge on the correct solution.
        if np.max(np.abs(popt - np.array([1., 1.]))) > self.tol:
            return self.badval
        # Covariance between best fit parallax amplitude and DCR amplitude.
        cov = pcov[1, 0]
        # Convert covarience between parallax and DCR amplitudes to normalized correlation
        perr = np.sqrt(np.diag(pcov))
        correlation = cov / (perr[0] * perr[1])
        result = correlation
        # This can throw infs.
        if np.isinf(result):
            result = self.badval
        return result
Esempio n. 19
0
    def run(self, dataSlice, slicePoint=None):

        if np.size(dataSlice) < 2:
            return self.badval

        filters = np.unique(dataSlice[self.filterCol])
        snr = np.zeros(len(dataSlice), dtype='float')
        # compute SNR for all observations
        for filt in filters:
            inFilt = np.where(dataSlice[self.filterCol] == filt)
            snr[inFilt] = mafUtils.m52snr(self.mags[filt], dataSlice[self.m5Col][inFilt])

        weights = self._computeWeights(dataSlice, snr)
        aveR = self._weightedR(dataSlice['ra_pi_amp'], dataSlice['dec_pi_amp'], weights)
        if self.thetaRange > 0:
            thetaCheck = self._thetaCheck(dataSlice['ra_pi_amp'], dataSlice['dec_pi_amp'], snr)
        else:
            thetaCheck = 1.
        result = aveR*thetaCheck
        return result
Esempio n. 20
0
    def run(self, dataSlice, slicePoint=None):
        if np.size(dataSlice) < 2:
            return self.badval

        filters = np.unique(dataSlice[self.filterCol])
        filters = [str(f) for f in filters]
        snr = np.zeros(len(dataSlice), dtype='float')
        # compute SNR for all observations
        for filt in filters:
            inFilt = np.where(dataSlice[self.filterCol] == filt)
            snr[inFilt] = mafUtils.m52snr(self.mags[str(filt)], dataSlice[self.m5Col][inFilt])

        weights = self._computeWeights(dataSlice, snr)
        aveR = self._weightedR(dataSlice['ra_pi_amp'], dataSlice['dec_pi_amp'], weights)
        if self.thetaRange > 0:
            thetaCheck = self._thetaCheck(dataSlice['ra_pi_amp'], dataSlice['dec_pi_amp'], snr)
        else:
            thetaCheck = 1.
        result = aveR*thetaCheck
        return result
Esempio n. 21
0
 def run(self, dataSlice, slicePoint=None):
     # The idea here is that we calculate position errors (in RA and Dec) for all observations.
     # Then we generate arrays of the parallax offsets (delta RA parallax = ra_pi_amp, etc)
     #  and the DCR offsets (delta RA DCR = ra_dcr_amp, etc), and just add them together into one
     #  RA  (and Dec) offset. Then, we try to fit for how we combined these offsets, but while
     #  considering the astrometric noise. If we can figure out that we just added them together
     # (i.e. the curve_fit result is [a=1, b=1] for the function _positions above)
     # then we should be able to disentangle the parallax and DCR offsets when fitting 'for real'.
     # compute SNR for all observations
     snr = np.zeros(len(dataSlice), dtype='float')
     for filt in self.filters:
         inFilt = np.where(dataSlice[self.filterCol] == filt)
         snr[inFilt] = mafUtils.m52snr(self.mags[filt], dataSlice[self.m5Col][inFilt])
     # Compute the centroiding uncertainties
     # Note that these centroiding uncertainties depend on the physical size of the PSF, thus
     # we are using seeingFwhmGeom for these metrics, not seeingFwhmEff.
     position_errors = np.sqrt(mafUtils.astrom_precision(dataSlice[self.seeingCol], snr)**2 +
                               self.atm_err**2)
     # Construct the vectors of RA/Dec offsets. xdata is the "input data". ydata is the "output".
     xdata = np.empty((2, dataSlice.size * 2), dtype=float)
     xdata[0, :] = np.concatenate((dataSlice['ra_pi_amp'], dataSlice['dec_pi_amp']))
     xdata[1, :] = np.concatenate((dataSlice['ra_dcr_amp'], dataSlice['dec_dcr_amp']))
     ydata = np.sum(xdata, axis=0)
     # Use curve_fit to compute covariance between parallax and dcr amplitudes
     # Set the initial guess slightly off from the correct [1,1] to make sure it iterates.
     popt, pcov = curve_fit(self._positions, xdata, ydata, p0=[1.1, 0.9],
                            sigma=np.concatenate((position_errors, position_errors)),
                            absolute_sigma=True)
     # Catch if the fit failed to converge on the correct solution.
     if np.max(np.abs(popt - np.array([1., 1.]))) > self.tol:
         return self.badval
     # Covariance between best fit parallax amplitude and DCR amplitude.
     cov = pcov[1, 0]
     # Convert covarience between parallax and DCR amplitudes to normalized correlation
     perr = np.sqrt(np.diag(pcov))
     correlation = cov/(perr[0]*perr[1])
     result = correlation
     # This can throw infs.
     if np.isinf(result):
         result = self.badval
     return result
    def run(self, dataSlice, slicePoint=None):
        if self.detect == True and self.time_before_peak > 0:
            raise Exception(
                "When detect = True, time_before_peak must be zero")
        # Generate the lightcurve for this object
        # make t a kind of simple way
        t = dataSlice[self.mjdCol] - np.min(dataSlice[self.nightCol])
        t = t - t.min()

        # Try for if a blending factor slice was added if not default to no blending factor
        try:
            amplitudes = microlensing_amplification(
                t,
                impact_parameter=slicePoint['impact_parameter'],
                crossing_time=slicePoint['crossing_time'],
                peak_time=slicePoint['peak_time'],
                blending_factor=slicePoint['blending_factor'])

        except:
            amplitudes = microlensing_amplification(
                t,
                impact_parameter=slicePoint['impact_parameter'],
                crossing_time=slicePoint['crossing_time'],
                peak_time=slicePoint['peak_time'])

        filters = np.unique(dataSlice[self.filterCol])
        amplified_mags = amplitudes * 0

        for filtername in filters:
            infilt = np.where(dataSlice[self.filterCol] == filtername)[0]
            amplified_mags[infilt] = self.mags[filtername] - 2.5 * np.log10(
                amplitudes[infilt])

        # The SNR of each point in the light curve
        snr = m52snr(amplified_mags, dataSlice[self.m5Col])
        # The magnitude uncertainties that go with amplified mags
        mag_uncert = 2.5 * np.log10(1 + 1. / snr)

        n_pre = []
        n_post = []
        for filtername in filters:
            if self.metricCalc == 'detect':
                if self.time_before_peak == 'optimal':
                    time_before_peak_optimal = info_peak_before_t0(
                        slicePoint['impact_parameter'],
                        slicePoint['crossing_time'])
                    # observations pre-peak and in the given filter
                    infilt = np.where((dataSlice[self.filterCol] == filtername)
                                      & (t < (slicePoint['peak_time'] -
                                              time_before_peak_optimal)))[0]

                else:
                    # observations pre-peak and in the given filter
                    infilt = np.where((dataSlice[self.filterCol] == filtername)
                                      & (t < (slicePoint['peak_time'] -
                                              self.time_before_peak)))[0]

                # observations post-peak and in the given filter
                outfilt = np.where((dataSlice[self.filterCol] == filtername)
                                   & (t > slicePoint['peak_time']))[0]
                # Broadcast to calc the mag_i - mag_j
                diffs = amplified_mags[infilt] - amplified_mags[
                    infilt][:, np.newaxis]
                diffs_uncert = np.sqrt(mag_uncert[infilt]**2 +
                                       mag_uncert[infilt][:, np.newaxis]**2)
                diffs_post = amplified_mags[outfilt] - amplified_mags[
                    outfilt][:, np.newaxis]
                diffs_post_uncert = np.sqrt(mag_uncert[outfilt]**2 +
                                            mag_uncert[outfilt][:,
                                                                np.newaxis]**2)

                # Calculating this as a catalog-level detection. In theory,
                # we could have a high SNR template image, so there would be
                # little to no additional uncertianty from the subtraction.

                sigma_above = np.abs(diffs) / diffs_uncert
                sigma_above_post = np.abs(diffs_post) / diffs_post_uncert
                # divide by 2 because array has i,j and j,i
                n_above = np.size(
                    np.where(sigma_above > self.detect_sigma)[0]) / 2
                n_pre.append(n_above)
                n_above_post = np.size(
                    np.where(sigma_above_post > self.detect_sigma)[0]) / 2
                n_post.append(n_above_post)

            elif self.metricCalc == 'Npts':
                # observations pre-peak and in the given filter within 2tE
                infilt = np.where((dataSlice[self.filterCol] == filtername)
                                  & (t < (slicePoint['peak_time']))
                                  & (t > (slicePoint['peak_time'] -
                                          slicePoint['crossing_time'])))[0]
                # observations post-peak and in the given filter within 2tE
                outfilt = np.where((dataSlice[self.filterCol] == filtername)
                                   & (t > (slicePoint['peak_time']))
                                   & (t < (slicePoint['peak_time'] +
                                           slicePoint['crossing_time'])))[0]

                n_pre.append(len(infilt))
                n_post.append(len(outfilt))

        npts = np.sum(n_pre)
        npts_post = np.sum(n_post)
        if self.metricCalc == 'detect':
            if self.detect == True:
                if npts >= self.ptsNeeded and npts_post >= self.ptsNeeded:
                    return 1
                else:
                    return 0
            else:
                if npts >= self.ptsNeeded:
                    return 1
                else:
                    return 0
        elif self.metricCalc == 'Npts':
            return npts + npts_post
Esempio n. 23
0
    def run(self, dataSlice, slicePoint=None):

        # Bail if we don't have enough points
        if dataSlice.size < self.means.size+3:
            return self.badval
        
        # Generate input for true light curve
        
        lightcurvelength = dataSlice.size
        
        np.set_printoptions(suppress=True) 
        
        t = np.empty(lightcurvelength, dtype=list(zip(['time','filter'],[float,'|U1'])))
        t['time'] = (dataSlice[self.mjdCol]-dataSlice[self.mjdCol].min())
        t['filter'] = dataSlice[self.filterCol]
        m5 = dataSlice[self.m5Col]
        
        
        lightcurvelength_days = self.time_interval
        
        # evaluate light curves piecewise in subruns
        subruns = int(np.max(t['time']) / lightcurvelength_days)
        
        print('number of subruns: ', subruns)
        fracRecovered_list=[]
        
        for subrun_idx in range(0,subruns):
        
            good = ( (t['time']>=(lightcurvelength_days*(subrun_idx))) & (t['time']<=    (lightcurvelength_days*(subrun_idx+1))) )
                  
            t_subrun = t[good]
            m5_subrun = m5[good]
         
            if(t_subrun['time'].size>0):

                # If we are adding a distance modulus to the magnitudes
                if 'distMod' in list(slicePoint.keys()):
                    mags = self.means + slicePoint['distMod']
                else:
                    mags = self.means
                
                #slightly different periods and amplitudes (+/- 10 %) to mimic true stars
                #random phase offsets to mimic observation starting at random phase

                true_period=random.uniform(0.9,1.1)*self.period
                true_amplitude=random.uniform(0.9,1.1)*self.amplitude
                
                if(np.isnan(self.phase)): 
                    #a random phase (in days) should be assigned
                    true_phase=random.uniform(0,1)*self.period
                else:
                    true_phase = self.phase
                
                trueParams = np.append(np.array([true_period, true_phase, true_amplitude]), mags)
                true_obj = periodicStar(t_subrun['filter'])
                trueLC = true_obj(t_subrun['time'], *trueParams)

                # Array to hold the fit results
                fits = np.zeros((self.nMonte,trueParams.size),dtype=float)
                for i in np.arange(self.nMonte):
                    snr = m52snr(trueLC,m5_subrun)
                    dmag = 2.5*np.log10(1.+1./snr)
                    noise = np.random.randn(trueLC.size)*dmag
                    # Suppress warnings about failing on covariance
                    fit_obj = periodicStar(t_subrun['filter'])
                    with warnings.catch_warnings():
                        warnings.simplefilter("ignore")
                        # If it fails to converge, save values that should fail later
                        try:
                            parmVals, pcov = curve_fit(fit_obj, t_subrun['time'], trueLC+noise, p0=trueParams,
                                                       sigma=dmag)
                        except:
                            parmVals = trueParams*0+np.inf
                    fits[i,:] = parmVals

                # Throw out any magnitude fits if there are no observations in that filter
                ufilters = np.unique(dataSlice[self.filterCol])
                if ufilters.size < 9:
                    for key in list(self.filter2index.keys()):
                        if key not in ufilters:
                            fits[:,self.filter2index[key]] = -np.inf

                # Find the fraction of fits that meet the "well-fit" criteria
                periodFracErr = np.abs((fits[:,0]-trueParams[0])/trueParams[0])
                ampFracErr = np.abs((fits[:,2]-trueParams[2])/trueParams[2])
                magErr = np.abs(fits[:,3:]-trueParams[3:])
                nBands = np.zeros(magErr.shape,dtype=int)
                nBands[np.where(magErr <= self.magTol)] = 1
                nBands = np.sum(nBands, axis=1)
                nRecovered = np.size(np.where( (periodFracErr <= self.periodTol) &
                                               (ampFracErr <= self.ampTol) &
                                               (nBands >= self.nBands) )[0])
                
                fracRecovered = float(nRecovered)/self.nMonte
         
                fracRecovered_list.append(fracRecovered)
            

        fracRecovered = np.sum(fracRecovered_list)/(len(fracRecovered_list))

        return fracRecovered
Esempio n. 24
0
    def run(self, dataSlice, slicePoint=None):
        """"Calculate the detectability of a transient with the specified lightcurve.

        If self.dataout is True, then returns the full lightcurve for each object instead of the total
        number of transients that are detected.

        Parameters
        ----------
        dataSlice : numpy.array
            Numpy structured array containing the data related to the visits provided by the slicer.
        
        slicePoint : dict, optional
            Dictionary containing information about the slicepoint currently active in the slicer.

        Returns
        -------
        float or list of dicts
            The total number of transients that could be detected. (if dataout is False)
            A dictionary with arrays of 'lcNumber', 'lcMag', 'detected', 'time', 'detectThresh', 'filter'
        """

        # Sort the entire dataSlice in order of time.
        dataSlice.sort(order=self.mjdCol)
        tSpan = (dataSlice[self.mjdCol].max() - dataSlice[self.mjdCol].min()
                 )  # in days

        lcv_template = self.lcv_template
        transDuration = lcv_template['ph'].max() - lcv_template['ph'].min(
        )  # in days

        # phase check
        tshifts = np.arange(self.nPhaseCheck) * transDuration / float(
            self.nPhaseCheck)

        lcNumber = np.floor(
            (dataSlice[self.mjdCol] - dataSlice[self.mjdCol].min()) /
            transDuration)
        ulcNumber = np.unique(lcNumber)

        nTransMax = 0
        nDetected = 0
        dataout_dict_list = []
        for tshift in tshifts:
            #print('check tshift ', tshift)
            lcEpoch = np.fmod(
                dataSlice[self.mjdCol] - dataSlice[self.mjdCol].min() + tshift,
                transDuration) + self.epochStart

            # total number of transients possibly detected
            nTransMax += np.ceil(tSpan / transDuration)

            # generate the actual light curve
            lcFilters = dataSlice[self.filterCol]
            lcMags = self.make_lightCurve(lcEpoch, lcFilters)
            lcSNR = utils.m52snr(lcMags, dataSlice[self.m5Col])

            # Identify detections above SNR for each filter
            lcAboveThresh = np.zeros(len(lcSNR), dtype=bool)
            for f in np.unique(lcFilters):
                filtermatch = np.where(dataSlice[self.filterCol] == f)
                lcAboveThresh[filtermatch] = np.where(
                    lcSNR[filtermatch] >= self.detectSNR[f], True, False)

            # check conditions for each light curve
            lcDetect = np.ones(len(ulcNumber), dtype=bool)
            lcDetectOut = np.ones(len(lcNumber), dtype=bool)
            for i, lcN in enumerate(ulcNumber):

                lcN_idx = np.where(lcNumber == lcN)
                lcEpoch_i = lcEpoch[lcN_idx]
                lcMags_i = lcMags[lcN_idx]
                lcFilters_i = lcFilters[lcN_idx]
                lcAboveThresh_i = lcAboveThresh[lcN_idx]

                #check total number of observations for each band
                for f in np.unique(lcFilters_i):
                    f_Idx = np.where(lcFilters_i == f)
                    if len(np.where(
                            lcAboveThresh_i[f_Idx])[0]) < self.nObsTotal[f]:
                        lcDetect[i] = False
                        lcDetectOut[lcN_idx] = False

                # number of observations before peak
                prePeakCheck = (lcEpoch_i <
                                self.peakEpoch - self.nearPeakT / 2)
                prePeakIdx = np.where(prePeakCheck == True)
                if len(np.where(
                        lcAboveThresh_i[prePeakIdx])[0]) < self.nObsPrePeak:
                    lcDetect[i] = False
                    lcDetectOut[lcN_idx] = False

                # check number of observations near peak for each band
                nearPeakCheck = (
                    lcEpoch_i >= self.peakEpoch - self.nearPeakT / 2) & (
                        lcEpoch_i <= self.peakEpoch + self.nearPeakT / 2)
                nearPeakIdx = np.where(nearPeakCheck == True)

                for f in np.unique(lcFilters_i):
                    nearPeakIdx_f = np.intersect1d(nearPeakIdx,
                                                   np.where(lcFilters_i == f))
                    if len(np.where(lcAboveThresh_i[nearPeakIdx_f])
                           [0]) < self.nObsNearPeak[f]:
                        lcDetect[i] = False
                        lcDetectOut[lcN_idx] = False

                # check number of filters near peak
                filtersNearPeakIdx = np.intersect1d(
                    nearPeakIdx,
                    np.where(lcAboveThresh_i)[0])
                if len(np.unique(lcFilters_i[filtersNearPeakIdx])
                       ) < self.nFiltersNearPeak:
                    lcDetect[i] = False
                    lcDetectOut[lcN_idx] = False

                ## check number of observations post peak
                # postPeakCheck
                postPeakCheck = (lcEpoch_i >= self.peakEpoch + self.nearPeakT /
                                 2) & (lcEpoch_i <= self.peakEpoch +
                                       self.nearPeakT / 2 + self.postPeakT)
                postPeakIdx = np.where(postPeakCheck == True)
                if len(np.where(
                        lcAboveThresh_i[postPeakIdx])[0]) < self.nObsPostPeak:
                    lcDetect[i] = False
                    lcDetectOut[lcN_idx] = False

                # check number of filters post peak
                filtersPostPeakIdx = np.intersect1d(
                    postPeakIdx,
                    np.where(lcAboveThresh_i)[0])
                if len(np.unique(lcFilters_i[filtersPostPeakIdx])
                       ) < self.nFiltersPostPeak:
                    lcDetect[i] = False
                    lcDetectOut[lcN_idx] = False

            # return values
            nDetected += len(np.where(lcDetect == True)[0])
            prePeakCheck = (lcEpoch <= self.peakEpoch - self.nearPeakT / 2)
            nearPeakCheck = (lcEpoch >=
                             (self.peakEpoch - self.nearPeakT / 2)) & (
                                 lcEpoch <=
                                 (self.peakEpoch + self.nearPeakT / 2))
            postPeakCheck = (
                lcEpoch >= (self.peakEpoch + self.nearPeakT / 2)) & (
                    lcEpoch <=
                    (self.peakEpoch + self.nearPeakT / 2 + self.postPeakT))

            #print(nTransMax, nDetected, lcDetect)
            dataout_dict_tshift = {
                'tshift': np.repeat(tshift, len(lcEpoch)),
                'expMJD': dataSlice[self.mjdCol],
                'm5': dataSlice[self.m5Col],
                'filters': dataSlice[self.filterCol],
                'lcNumber': lcNumber,
                'lcEpoch': lcEpoch,
                'prePeakCheck': prePeakCheck,
                'nearPeakCheck': nearPeakCheck,
                'postPeakCheck': postPeakCheck,
                'lcMags': lcMags,
                'lcSNR': lcSNR,
                'lcMagsStd': self.snr2std(lcSNR),
                'lcAboveThresh': lcAboveThresh,
                'detected': lcDetectOut
            }

            dataout_dict_list.append(dataout_dict_tshift)

        if self.dataout:

            return dataout_dict_list

        else:
            return float(nDetected / nTransMax) if nTransMax != 0 else 0.
Esempio n. 25
0
    def run(self, dataSlice, slicePoint=None):
        """"Calculate the detectability of a transient with the specified lightcurve.

        If self.dataout is True, then returns the full lightcurve for each object instead of the total
        number of transients that are detected.

        Parameters
        ----------
        dataSlice : numpy.array
            Numpy structured array containing the data related to the visits provided by the slicer.
        
        slicePoint : dict, optional
            Dictionary containing information about the slicepoint currently active in the slicer.

        Returns
        -------
        float or list of dicts
            The total number of transients that could be detected. (if dataout is False)
            A dictionary with arrays of 'lcNumber', 'lcMag', 'detected', 'time', 'detectThresh', 'filter'
        """

        # Sort the entire dataSlice in order of time.
        dataSlice.sort(order=self.mjdCol)
        survey_length = (dataSlice[self.mjdCol].max() -
                         dataSlice[self.mjdCol].min())  # in days

        lcv_template = self.lcv_template
        transDuration = lcv_template['ph'].max() - lcv_template['ph'].min(
        )  # in days

        # how many event occured
        nLc = np.random.poisson(self.eventRate * survey_length)

        # generate nLc random start time of each light curve
        t0 = np.random.randint(0,
                               int(survey_length) + 1,
                               nLc) + dataSlice[self.mjdCol].min()

        # dict to store output info
        lcDictList = []
        nDetected = 0

        # loop over each light curve
        for i, t0_i in enumerate(t0):
            # the index for ith light curve
            lcIdx = (dataSlice[self.mjdCol] >=
                     t0_i) & (dataSlice[self.mjdCol] <= t0_i + transDuration)

            lcMjd = dataSlice[self.mjdCol][lcIdx]
            lcEpoch = lcMjd - t0_i + self.epochStart
            lcFilters = dataSlice[self.filterCol][lcIdx]

            # make light curve
            lcMags = self.make_lightCurve(lcEpoch, lcFilters)

            # get SNR
            m5 = dataSlice[self.m5Col][lcIdx]
            lcSNR = utils.m52snr(lcMags, m5)

            # check SNR for each filter
            lcAboveThresh = np.zeros(len(lcSNR), dtype=bool)
            for f in np.unique(lcFilters):
                filtermatch = np.where(lcFilters == f)
                lcAboveThresh[filtermatch] = np.where(
                    lcSNR[filtermatch] >= self.detectSNR[f], True, False)

            # ----------check all conditions-----------
            # first assume lcDetect = True, if one condition fails, set to False
            lcDetect = True

            # check total number of observations for each band
            for f in np.unique(lcFilters):
                filtermatch = np.where(lcFilters == f)
                if len(np.where(
                        lcAboveThresh[filtermatch])[0]) < self.nObsTotal[f]:
                    lcDetect = False

            # number of observations before peak
            prePeakCheck = (lcEpoch < self.peakEpoch - self.nearPeakT / 2)
            prePeakIdx = np.where(prePeakCheck == True)
            if len(np.where(lcAboveThresh[prePeakIdx])[0]) < self.nObsPrePeak:
                lcDetect = False

            # check number of observations near peak for each band
            nearPeakCheck = (
                lcEpoch >= self.peakEpoch - self.nearPeakT / 2) & (
                    lcEpoch <= self.peakEpoch + self.nearPeakT / 2)
            nearPeakIdx = np.where(nearPeakCheck == True)
            # near peak obs for each band
            for f in np.unique(lcFilters):
                nearPeakIdx_f = np.intersect1d(nearPeakIdx,
                                               np.where(lcFilters == f))
                if len(np.where(lcAboveThresh[nearPeakIdx_f])
                       [0]) < self.nObsNearPeak[f]:
                    lcDetect = False

            # check number of filters near peak
            filtersNearPeakIdx = np.intersect1d(nearPeakIdx,
                                                np.where(lcAboveThresh)[0])
            if len(np.unique(
                    lcFilters[filtersNearPeakIdx])) < self.nFiltersNearPeak:
                lcDetect = False

            ## check number of observations post peak
            # postPeakCheck
            postPeakCheck = (lcEpoch >= self.peakEpoch + self.nearPeakT / 2
                             ) & (lcEpoch <= self.peakEpoch +
                                  self.nearPeakT / 2 + self.postPeakT)
            postPeakIdx = np.where(postPeakCheck == True)
            if len(np.where(
                    lcAboveThresh[postPeakIdx])[0]) < self.nObsPostPeak:
                lcDetect = False

            # check number of filters post peak
            filtersPostPeakIdx = np.intersect1d(postPeakIdx,
                                                np.where(lcAboveThresh)[0])
            if len(np.unique(
                    lcFilters[filtersPostPeakIdx])) < self.nFiltersPostPeak:
                lcDetect = False

            # ----------------------

            # values for output
            if lcDetect == True:
                nDetected += 1

            lcDict = {
                'lcN': i,
                'lcMjd': lcMjd,
                'lcEpoch': lcEpoch,
                'lcFilters': lcFilters,
                'lcMags': lcMags,
                'm5': m5,
                'lcSNR': lcSNR,
                'lcMagsStd': self.snr2std(lcSNR),
                'lcAboveThresh': lcAboveThresh,
                'prePeakCheck': prePeakCheck,
                'nearPeakCheck': nearPeakCheck,
                'postPeakCheck': postPeakCheck,
                'detected': lcDetect
            }

            lcDictList.append(lcDict)

        if self.dataout:

            return lcDictList
        else:
            #return float(nDetected / nTransMax) if nTransMax!=0 else 0.
            return float(nDetected / nLc) if nLc != 0 else 0.
    def run(self, dataSlice, slicePoint=None):

        # Bail if we don't have enough points
        if dataSlice.size < self.means.size + 3:
            return self.badval

        # Generate input for true light curve
        t = np.empty(dataSlice.size, dtype=zip(["time", "filter"], [float, "|S1"]))
        t["time"] = dataSlice[self.mjdCol] - dataSlice[self.mjdCol].min()
        t["filter"] = dataSlice[self.filterCol]

        # If we are adding a distance modulus to the magnitudes
        if "distMod" in slicePoint.keys():
            mags = self.means + slicePoint["distMod"]
        else:
            mags = self.means
        trueParams = np.append(np.array([self.period, self.phase, self.amplitude]), mags)
        trueLC = periodicStar(t, *trueParams)

        # Array to hold the fit results
        fits = np.zeros((self.nMonte, trueParams.size), dtype=float)

        # generate phase array up-front
        phaseMC = np.repeat(self.phase, self.nMonte)
        if self.randomisePhase:
            phaseMC = np.random.uniform(size=self.nMonte) * self.period

        # Set up object to hold the luminosity function. Find the
        # interpolation function before doing the monte carlo trials
        # (since the fit to the LF is only dependent on the
        # location). Also populate the seeing
        PhotCrowd = confusion.CrowdingSigma(dataSlice, slicePoint)
        PhotCrowd.getErrorFuncAndSeeing()

        # We only have crowding information for r-band.
        bCanCrowd = t["filter"] == "r"  # can add more conditions here
        gCanCrowd = np.where(bCanCrowd)

        # WIC - be a bit stricter with outliers
        if np.size(gCanCrowd) < 80:
            return self.badval

        # Loop through the Monte Carlo trials
        for i in np.arange(self.nMonte):

            # Copy the parameters, slot in the phase (randomized or not)
            trialParams = np.copy(trueParams)
            trialParams[1] = phaseMC[i]

            # Generate the "clean" lightcurve for this trial (incl randomized phase)
            trialLC = periodicStar(t, *trialParams)

            # Estimate the photometric uncertainty.
            snr = m52snr(trialLC, dataSlice[self.m5Col])
            sigmPhot = 2.5 * np.log10(1.0 + 1.0 / snr)

            # print "DEBUG:", np.size(sigmPhot), np.size(trialLC)

            # Now for crowding uncertainty. Pass the current set of
            # magnitudes to the Crowd object, estimate the sigmCrowd
            # at each point. At this date (2016-01-03) I still think
            # it's better to use the "true" magnitude rather than
            # rather than perturbing by photometric noise first.
            PhotCrowd.magSamples = np.copy(trialLC)
            PhotCrowd.calcSigmaSeeingFromInterp()
            sigmCrowd = np.copy(PhotCrowd.sigmaWithSeeing)

            # Now apply both sources of error in succession. I am not
            # convinced the quad sum is correct in this situation, but
            # we have to start somewhere...
            photLC = trialLC + np.random.randn(trialLC.size) * sigmPhot
            if not self.ignoreCrowding:
                bothLC = photLC + np.random.randn(photLC.size) * sigmCrowd
                sigmBoth = np.sqrt(sigmPhot ** 2 + sigmCrowd ** 2)
            else:
                bothLC = np.copy(photLC)
                sigmaBoth = np.copy(sigmaPhot)

            if i < 1 and self.beVerbose:
                ThisRA = slicePoint["ra"] * 180.0 / np.pi
                ThisDE = slicePoint["dec"] * 180.0 / np.pi
                medLC = np.median(bothLC[gCanCrowd])
                medTr = np.median(trialLC[gCanCrowd])
                medCr = np.median(sigmCrowd[gCanCrowd])
                stdCr = np.std(sigmCrowd[gCanCrowd])
                # Update - not convinced crowding metric is being
                # correctly applied...
                seeMed = np.median(PhotCrowd.vecSeeing[gCanCrowd])

                medPh = np.median(sigmPhot[gCanCrowd])
                stdTo = np.std(sigmBoth)
                lcPho = np.std(photLC[gCanCrowd])
                lcBot = np.std(bothLC[gCanCrowd])
                print "INFO: %4i %.2f %.2f,  %.3f %.3f, %.3f, %.4f %.4f ; %.4f; %.3f %.3f" % (
                    np.size(gCanCrowd),
                    ThisRA,
                    ThisDE,
                    medLC,
                    medTr,
                    seeMed,
                    medPh,
                    medCr,
                    stdCr,
                    lcPho,
                    lcBot,
                )

            # At this point we search for the periodic signal.

            # WIC 2016-01-03 - I *think* this should work OK even
            # though we only have a subset of datapoints that can be
            # useful (i.e. at the right filter). Just limit the points
            # fed to curve_fit to those for which we have crowding
            # information.

            ### noise = np.random.randn(trueLC.size)*dmag
            # Suppress warnings about failing on covariance
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                # If it fails to converge, save values that should fail later
                try:
                    parmVals, pcov = curve_fit(
                        periodicStar, t[gCanCrowd], bothLC[gCanCrowd], p0=trueParams, sigma=sigmBoth[gCanCrowd]
                    )
                except:
                    parmVals = trueParams * 0 - 666
            fits[i, :] = parmVals

        # Throw out any magnitude fits if there are no observations in that filter
        ufilters = np.unique(dataSlice[self.filterCol])
        if ufilters.size < 9:
            for key in self.filter2index.keys():
                if key not in ufilters:
                    fits[:, self.filter2index[key]] = -np.inf

        # Find the fraction of fits that meet the "well-fit" criteria
        periodFracErr = (fits[:, 0] - trueParams[0]) / trueParams[0]
        ampFracErr = (fits[:, 2] - trueParams[2]) / trueParams[2]
        magErr = fits[:, 3:] - trueParams[3:]
        nBands = np.zeros(magErr.shape, dtype=int)
        nBands[np.where(magErr <= self.magTol)] = 1
        nBands = np.sum(nBands, axis=1)
        nRecovered = np.size(
            np.where((periodFracErr <= self.periodTol) & (ampFracErr <= self.ampTol) & (nBands >= self.nBands))[0]
        )
        fracRecovered = float(nRecovered) / self.nMonte
        return fracRecovered
Esempio n. 27
0
    def run(self, dataSlice, slicePoint=None):
        np.random.seed(2500)
        obs = np.where((dataSlice['filter'] == self.f) &
                       (dataSlice[self.mjdCol] < min(dataSlice[self.mjdCol]) +
                        365 * self.surveyduration))
        d = np.array(self.simobj['d'])
        M = np.array(self.simobj['MAG'])
        fieldRA, fieldDec = np.mean(dataSlice['fieldRA']), np.mean(
            dataSlice['fieldDec'])
        z, R = d * np.sin(fieldRA), d * np.cos(fieldRA)
        component = self.position_selection(R, z)
        mjd = dataSlice[self.mjdCol][obs]
        fwhm = dataSlice[self.seeingCol][obs]
        V_galactic = np.vstack((self.U, self.V, self.W))
        Pv = self.DF(V_galactic, component, R, z)
        marg_P = np.nanmean(Pv / np.nansum(Pv, axis=0), axis=0)
        marg_P /= np.nansum(marg_P)
        vel_idx = np.random.choice(np.arange(0, len(V_galactic[0, :]),
                                             1)[np.isfinite(marg_P)],
                                   p=marg_P[np.isfinite(marg_P)],
                                   size=3)
        vT_unusual = V_galactic[0, vel_idx][2]
        if self.prob_type == 'uniform':
            p_vel_unusual = uniform(-100, 100)
            v_unusual = p_vel_unusual.rvs(size=(3, np.size(d)))
            vT = v_unusual[2, :]
        else:
            p_vel_un = pd.read_csv(self.prob_type)
            vel_idx = np.random.choice(p_vel_un['vel'],
                                       p=p_vel_un['fraction'] /
                                       np.sum(p_vel_un['fraction']),
                                       size=3)
            vT_unusual = V_galactic[0, vel_idx][2]
        #vel_unusual = V_galactic[0,vel_idx]

        direction = np.random.choice((-1, 1))
        mu = direction * vT / 4.75 / d
        mu_unusual = direction * vT_unusual / 4.75 / d

        if len(dataSlice[self.m5Col][obs]) > 2:

            # select objects above the limit magnitude threshold
            snr = m52snr(M[:, np.newaxis], dataSlice[self.m5Col][obs])
            row, col = np.where(snr > self.snr_lim)
            if self.gap_selection:
                Times = np.sort(mjd)
                dt = np.array(list(combinations(Times, 2)))
                DeltaTs = np.absolute(np.subtract(dt[:, 0], dt[:, 1]))
                DeltaTs = np.unique(DeltaTs)
                if np.size(DeltaTs) > 0:
                    dt_pm = 0.05 * np.amin(
                        dataSlice[self.seeingCol][obs]) / muf[np.unique(row)]
                    selection = np.where((dt_pm > min(DeltaTs))
                                         & (dt_pm < max(DeltaTs)))
            else:
                selection = np.unique(row)
            precis = astrom_precision(dataSlice[self.seeingCol][obs],
                                      snr[row, :])
            sigmapm = self.sigma_slope(dataSlice[self.mjdCol][obs],
                                       precis) * 365.25 * 1e3

            if np.size(selection) > 0:
                pa = np.random.uniform(0, 2 * np.pi,
                                       len(mu_unusual[selection]))
                pm_alpha, pm_delta = mu[selection] * np.sin(
                    pa), mu[selection] * np.cos(pa)
                pm_un_alpha, pm_un_delta = mu_unusual[selection] * np.sin(
                    pa), mu_unusual[selection] * np.cos(pa)
                #p_min,p_max,p_mean = self.percentiles[0],self.percentiles[1],self.percentiles[2]
                mu = mu[selection] * 1e3
                mu_unusual = mu_unusual[selection] * 1e3
                variance_k = np.array([
                    np.std(mu[np.where(component[selection] == p)])
                    for p in ['H', 'D', 'B']
                ])
                variance_mu = np.std(mu)
                sigmaL = np.sqrt(
                    np.prod(variance_k, where=np.isfinite(variance_k))**2 +
                    variance_mu**2 + np.nanmedian(sigmapm)**2)
                unusual = np.where((mu_unusual < np.mean(mu_unusual) -
                                    self.sigma_threshold * sigmaL / 2)
                                   | (mu_unusual > np.mean(mu_unusual) +
                                      self.sigma_threshold * sigmaL / 2))
                res = np.size(unusual) / np.size(selection)

                if self.dataout:
                    dic = {
                        'detected':
                        res,
                        'pixID':
                        radec2pix(nside=16,
                                  ra=np.radians(fieldRA),
                                  dec=np.radians(fieldDec)),
                        'PM':
                        pd.DataFrame({
                            'pm_alpha': pm_alpha,
                            'pm_delta': pm_delta
                        }),
                        'PM_un':
                        pd.DataFrame({
                            'pm_alpha': pm_un_alpha,
                            'pm_delta': pm_un_delta
                        })
                    }
                    return dic
                else:
                    return res
Esempio n. 28
0
    def run(self, dataSlice, slicePoint=None):
        """"Calculate the detectability of a transient with the specified lightcurve.

        If self.dataout is True, then returns the full lightcurve for each object instead of the total
        number of transients that are detected.

        Parameters
        ----------
        dataSlice : numpy.array
            Numpy structured array containing the data related to the visits provided by the slicer.
        slicePoint : dict, optional
            Dictionary containing information about the slicepoint currently active in the slicer.

        Returns
        -------
        float or dict
            The total number of transients that could be detected. (if dataout is False)
            A dictionary with arrays of 'lcNumber', 'lcMag', 'detected', 'time', 'detectThresh', 'filter'
        """

        # Sort the entire dataSlice in order of time.
        dataSlice.sort(order=self.mjdCol)

        # Check that surveyDuration is not larger than the time of observations we obtained.
        # (if it is, then the nTransMax will not be accurate).
        tSpan = (dataSlice[self.mjdCol].max() - dataSlice[self.mjdCol].min()) / 365.25
        surveyDuration = np.max([tSpan, self.surveyDuration])

        if self.surveyStart is None:
            surveyStart = dataSlice[self.mjdCol].min()
        else:
            surveyStart = self.surveyStart

        # Set up the starting times for each of the back-to-back sets of transients.
        tshifts = np.arange(self.nPhaseCheck) * self.transDuration / float(self.nPhaseCheck)
        # Total number of transient which have reached detection threshholds.
        nDetected = 0
        # Total number of transients which could possibly be detected,
        # given survey duration and transient duration.
        nTransMax = 0
        # Set this, in case surveyStart was set to be much earlier than this data (so we start counting at 0).
        lcNumberStart = -1 * np.floor((dataSlice[self.mjdCol].min() - surveyStart) / self.transDuration)

        # Consider each different 'phase shift' separately.
        # We then just have a series of lightcurves, taking place back-to-back.
        for tshift in tshifts:
            # Update the maximum possible transients that could have been observed during surveyDuration.
            nTransMax += np.ceil(surveyDuration / (self.transDuration / 365.25))
            # Calculate the time/epoch for each lightcurve.
            lcEpoch = (dataSlice[self.mjdCol] - surveyStart + tshift) % self.transDuration
            # Identify the observations which belong to each distinct light curve.
            lcNumber = np.floor((dataSlice[self.mjdCol] - surveyStart) / self.transDuration) + lcNumberStart
            lcNumberStart = lcNumber.max()
            ulcNumber = np.unique(lcNumber)
            lcLeft = np.searchsorted(lcNumber, ulcNumber, side='left')
            lcRight = np.searchsorted(lcNumber, ulcNumber, side='right')

            # Generate the actual light curve magnitudes and SNR
            lcMags = self.make_lightCurve(lcEpoch, dataSlice[self.filterCol])
            lcSNR = m52snr(lcMags, dataSlice[self.m5Col])
            # Identify which detections rise above the required SNR threshhold, in each filter.
            lcAboveThresh = np.zeros(len(lcSNR), dtype=bool)
            for f in np.unique(dataSlice[self.filterCol]):
                filtermatch = np.where(dataSlice[self.filterCol] == f)[0]
                lcAboveThresh[filtermatch] = np.where(lcSNR[filtermatch] >= self.detectSNR[f],
                                                      True,
                                                      False)

            # Track whether each individual light curve was detected.
            # Start with the assumption that it is True, and if it fails criteria then becomes False.
            lcDetect = np.ones(len(ulcNumber), dtype=bool)

            # Loop through each lightcurve and check if it meets requirements.
            for lcN, le, ri in zip(ulcNumber, lcLeft, lcRight):
                # If there were no observations at all for this lightcurve:
                if le == ri:
                    lcDetect[lcN] = False
                    # Skip the rest of this loop, go on to the next lightcurve.
                    continue
                lcEpochAboveThresh = lcEpoch[le:ri][np.where(lcAboveThresh[le:ri])]
                # If we did not get enough detections before preT, set lcDetect to False.
                timesPreT = np.where(lcEpochAboveThresh < self.preT)[0]
                if len(timesPreT) < self.nPreT:
                    lcDetect[lcN] = False
                    continue
                # If we did not get detections over enough sections of the lightcurve, set lcDtect to False.
                phaseSections = np.unique(np.floor(lcEpochAboveThresh / self.transDuration * self.nPerLC))
                if len(phaseSections) < self.nPerLC:
                    lcDetect[lcN] = False
                    continue
                # If we did not get detections in enough filters, set lcDetect to False.
                lcFilters = dataSlice[le:ri][np.where(lcAboveThresh[le:ri])][self.filterCol]
                if len(np.unique(lcFilters)) < self.nFilters:
                    lcDetect[lcN] = False
                    continue
                # If we did not get detections in enough filters within required time, set lcDetect to False.
                if (self.filterT is not None) and (self.nFilters > 1):
                    xr = np.searchsorted(lcEpochAboveThresh, lcEpochAboveThresh + self.filterT, 'right')
                    xr = np.where(xr < len(lcEpochAboveThresh) - 1, xr, len(lcEpochAboveThresh) - 1)
                    foundGood = False
                    for i, xri in enumerate(xr):
                        if len(np.unique(lcFilters[i:xri])) >= self.nFilters:
                            foundGood = True
                            break
                    if not foundGood:
                        lcDetect[lcN] = False
                        continue
                # Done with current set of conditions.
                # (more complicated conditions should go later in the loop, simpler ones earlier).

            # Find the unique number of light curves that passed the required number of conditions
            nDetected += len(np.where(lcDetect == True)[0])

        if self.dataout:
            # Output all the light curves, regardless of detection threshhold,
            # but indicate which were 'detected'.
            lcDetectOut = np.ones(len(dataSlice), dtype=bool)
            for i, lcN in enumerate(lcNumber):
                lcDetectOut[i] = lcDetect[lcN]
            return {'lcNumber': lcNumber, 'expMJD': dataSlice[self.mjdCol], 'epoch': lcEpoch,
                    'filter': dataSlice[self.filterCol], 'lcMag': lcMags, 'SNR': lcSNR,
                    'detected': lcDetectOut}
        else:
            return float(nDetected) / nTransMax
Esempio n. 29
0
    def run(self, dataSlice, slicePoint=None):

        # Bail if we don't have enough points
        if dataSlice.size < self.means.size + 3:
            return self.badval

        # Generate input for true light curve
        t = np.empty(dataSlice.size,
                     dtype=zip(['time', 'filter'], [float, '|S1']))
        t['time'] = dataSlice[self.mjdCol] - dataSlice[self.mjdCol].min()
        t['filter'] = dataSlice[self.filterCol]

        # If we are adding a distance modulus to the magnitudes
        if 'distMod' in slicePoint.keys():
            mags = self.means + slicePoint['distMod']
        else:
            mags = self.means
        trueParams = np.append(
            np.array([self.period, self.phase, self.amplitude]), mags)
        trueLC = periodicStar(t, *trueParams)

        # Array to hold the fit results
        fits = np.zeros((self.nMonte, trueParams.size), dtype=float)

        # generate phase array up-front
        phaseMC = np.repeat(self.phase, self.nMonte)
        if self.randomisePhase:
            phaseMC = np.random.uniform(size=self.nMonte) * self.period

        # Set up object to hold the luminosity function. Find the
        # interpolation function before doing the monte carlo trials
        # (since the fit to the LF is only dependent on the
        # location). Also populate the seeing
        PhotCrowd = confusion.CrowdingSigma(dataSlice, slicePoint)
        PhotCrowd.getErrorFuncAndSeeing()

        # We only have crowding information for r-band.
        bCanCrowd = t['filter'] == "r"  # can add more conditions here
        gCanCrowd = np.where(bCanCrowd)

        # WIC - be a bit stricter with outliers
        if np.size(gCanCrowd) < 80:
            return self.badval

        # Loop through the Monte Carlo trials
        for i in np.arange(self.nMonte):

            # Copy the parameters, slot in the phase (randomized or not)
            trialParams = np.copy(trueParams)
            trialParams[1] = phaseMC[i]

            # Generate the "clean" lightcurve for this trial (incl randomized phase)
            trialLC = periodicStar(t, *trialParams)

            # Estimate the photometric uncertainty.
            snr = m52snr(trialLC, dataSlice[self.m5Col])
            sigmPhot = 2.5 * np.log10(1. + 1. / snr)

            #print "DEBUG:", np.size(sigmPhot), np.size(trialLC)

            # Now for crowding uncertainty. Pass the current set of
            # magnitudes to the Crowd object, estimate the sigmCrowd
            # at each point. At this date (2016-01-03) I still think
            # it's better to use the "true" magnitude rather than
            # rather than perturbing by photometric noise first.
            PhotCrowd.magSamples = np.copy(trialLC)
            PhotCrowd.calcSigmaSeeingFromInterp()
            sigmCrowd = np.copy(PhotCrowd.sigmaWithSeeing)

            # Now apply both sources of error in succession. I am not
            # convinced the quad sum is correct in this situation, but
            # we have to start somewhere...
            photLC = trialLC + np.random.randn(trialLC.size) * sigmPhot
            if not self.ignoreCrowding:
                bothLC = photLC + np.random.randn(photLC.size) * sigmCrowd
                sigmBoth = np.sqrt(sigmPhot**2 + sigmCrowd**2)
            else:
                bothLC = np.copy(photLC)
                sigmaBoth = np.copy(sigmaPhot)

            if i < 1 and self.beVerbose:
                ThisRA = slicePoint['ra'] * 180. / np.pi
                ThisDE = slicePoint['dec'] * 180. / np.pi
                medLC = np.median(bothLC[gCanCrowd])
                medTr = np.median(trialLC[gCanCrowd])
                medCr = np.median(sigmCrowd[gCanCrowd])
                stdCr = np.std(sigmCrowd[gCanCrowd])
                # Update - not convinced crowding metric is being
                # correctly applied...
                seeMed = np.median(PhotCrowd.vecSeeing[gCanCrowd])

                medPh = np.median(sigmPhot[gCanCrowd])
                stdTo = np.std(sigmBoth)
                lcPho = np.std(photLC[gCanCrowd])
                lcBot = np.std(bothLC[gCanCrowd])
                print "INFO: %4i %.2f %.2f,  %.3f %.3f, %.3f, %.4f %.4f ; %.4f; %.3f %.3f" \
                    % (np.size(gCanCrowd), ThisRA, ThisDE, medLC, medTr, seeMed, medPh, medCr, stdCr, lcPho, lcBot)

            # At this point we search for the periodic signal.

            # WIC 2016-01-03 - I *think* this should work OK even
            # though we only have a subset of datapoints that can be
            # useful (i.e. at the right filter). Just limit the points
            # fed to curve_fit to those for which we have crowding
            # information.

            ### noise = np.random.randn(trueLC.size)*dmag
            # Suppress warnings about failing on covariance
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                # If it fails to converge, save values that should fail later
                try:
                    parmVals, pcov = curve_fit(periodicStar, \
                                                   t[gCanCrowd], bothLC[gCanCrowd], \
                                                   p0=trueParams, \
                                                   sigma=sigmBoth[gCanCrowd])
                except:
                    parmVals = trueParams * 0 - 666
            fits[i, :] = parmVals

        # Throw out any magnitude fits if there are no observations in that filter
        ufilters = np.unique(dataSlice[self.filterCol])
        if ufilters.size < 9:
            for key in self.filter2index.keys():
                if key not in ufilters:
                    fits[:, self.filter2index[key]] = -np.inf

        # Find the fraction of fits that meet the "well-fit" criteria
        periodFracErr = (fits[:, 0] - trueParams[0]) / trueParams[0]
        ampFracErr = (fits[:, 2] - trueParams[2]) / trueParams[2]
        magErr = fits[:, 3:] - trueParams[3:]
        nBands = np.zeros(magErr.shape, dtype=int)
        nBands[np.where(magErr <= self.magTol)] = 1
        nBands = np.sum(nBands, axis=1)
        nRecovered = np.size(
            np.where((periodFracErr <= self.periodTol)
                     & (ampFracErr <= self.ampTol)
                     & (nBands >= self.nBands))[0])
        fracRecovered = float(nRecovered) / self.nMonte
        return fracRecovered
    def run(self, dataSlice, slicePoint=None):
        """"Calculate the detectability of a transient with the specified lightcurve.

        If self.dataout is True, then returns the full lightcurve for each object instead of the total
        number of transients that are detected.

        Parameters
        ----------
        dataSlice : numpy.array
            Numpy structured array containing the data related to the visits provided by the slicer.
        slicePoint : dict, optional
            Dictionary containing information about the slicepoint currently active in the slicer.

        Returns
        -------
        float or dict
            The total number of transients that could be detected. (if dataout is False)
            A dictionary with arrays of 'lcNumber', 'lcMag', 'detected', 'time', 'detectThresh', 'filter'
        """

        # Sort the entire dataSlice in order of time.
        dataSlice.sort(order=self.mjdCol)

        # Check that surveyDuration is not larger than the time of observations we obtained.
        # (if it is, then the nTransMax will not be accurate).
        tSpan = (dataSlice[self.mjdCol].max() - dataSlice[self.mjdCol].min()) / 365.25
        surveyDuration = np.max([tSpan, self.surveyDuration])

        if self.surveyStart is None:
            surveyStart = dataSlice[self.mjdCol].min()
        else:
            surveyStart = self.surveyStart

        # Set up the starting times for each of the back-to-back sets of transients.
        tshifts = np.arange(self.nPhaseCheck) * self.transDuration / float(self.nPhaseCheck)
        # Total number of transient which have reached detection threshholds.
        nDetected = 0
        # Total number of transients which could possibly be detected,
        # given survey duration and transient duration.
        nTransMax = 0
        # Set this, in case surveyStart was set to be much earlier than this data (so we start counting at 0).
        lcNumberStart = -1 * np.floor((dataSlice[self.mjdCol].min() - surveyStart) / self.transDuration)

        # Consider each different 'phase shift' separately.
        # We then just have a series of lightcurves, taking place back-to-back.
        for tshift in tshifts:
            # Update the maximum possible transients that could have been observed during surveyDuration.
            nTransMax += np.ceil(surveyDuration / (self.transDuration / 365.25))
            # Calculate the time/epoch for each lightcurve.
            lcEpoch = (dataSlice[self.mjdCol] - surveyStart + tshift) % self.transDuration
            # Identify the observations which belong to each distinct light curve.
            lcNumber = np.floor((dataSlice[self.mjdCol] - surveyStart) / self.transDuration) + lcNumberStart
            lcNumberStart = lcNumber.max()
            ulcNumber = np.unique(lcNumber)
            lcLeft = np.searchsorted(lcNumber, ulcNumber, side='left')
            lcRight = np.searchsorted(lcNumber, ulcNumber, side='right')

            # Generate the actual light curve magnitudes and SNR
            lcMags = self.make_lightCurve(lcEpoch, dataSlice[self.filterCol])
            lcSNR = m52snr(lcMags, dataSlice[self.m5Col])
            # Identify which detections rise above the required SNR threshhold, in each filter.
            lcAboveThresh = np.zeros(len(lcSNR), dtype=bool)
            for f in np.unique(dataSlice[self.filterCol]):
                filtermatch = np.where(dataSlice[self.filterCol] == f)[0]
                lcAboveThresh[filtermatch] = np.where(lcSNR[filtermatch] >= self.detectSNR[f],
                                                      True,
                                                      False)

            # Track whether each individual light curve was detected.
            # Start with the assumption that it is True, and if it fails criteria then becomes False.
            lcDetect = np.ones(len(ulcNumber), dtype=bool)

            # Loop through each lightcurve and check if it meets requirements.
            for lcN, le, ri in zip(ulcNumber, lcLeft, lcRight):
                # If there were no observations at all for this lightcurve:
                if le == ri:
                    lcDetect[lcN] = False
                    # Skip the rest of this loop, go on to the next lightcurve.
                    continue
                lcEpochAboveThresh = lcEpoch[le:ri][np.where(lcAboveThresh[le:ri])]
                # If we did not get enough detections before preT, set lcDetect to False.
                timesPreT = np.where(lcEpochAboveThresh < self.preT)[0]
                if len(timesPreT) < self.nPreT:
                    lcDetect[lcN] = False
                    continue
                # If we did not get detections over enough sections of the lightcurve, set lcDtect to False.
                phaseSections = np.unique(np.floor(lcEpochAboveThresh / self.transDuration * self.nPerLC))
                if len(phaseSections) < self.nPerLC:
                    lcDetect[lcN] = False
                    continue
                # If we did not get detections in enough filters, set lcDetect to False.
                lcFilters = dataSlice[le:ri][np.where(lcAboveThresh[le:ri])][self.filterCol]
                if len(np.unique(lcFilters)) < self.nFilters:
                    lcDetect[lcN] = False
                    continue
                # If we did not get detections in enough filters within required time, set lcDetect to False.
                if (self.filterT is not None) and (self.nFilters > 1):
                    xr = np.searchsorted(lcEpochAboveThresh, lcEpochAboveThresh + self.filterT, 'right')
                    xr = np.where(xr < len(lcEpochAboveThresh) - 1, xr, len(lcEpochAboveThresh) - 1)
                    foundGood = False
                    for i, xri in enumerate(xr):
                        if len(np.unique(lcFilters[i:xri])) >= self.nFilters:
                            foundGood = True
                            break
                    if not foundGood:
                        lcDetect[lcN] = False
                        continue
                # Done with current set of conditions.
                # (more complicated conditions should go later in the loop, simpler ones earlier).

            # Find the unique number of light curves that passed the required number of conditions
            nDetected += len(np.where(lcDetect == True)[0])

        if self.dataout:
            # Output all the light curves, regardless of detection threshhold,
            # but indicate which were 'detected'.
            lcDetectOut = np.ones(len(dataSlice), dtype=bool)
            for i, lcN in enumerate(lcNumber):
                lcDetectOut[i] = lcDetect[lcN]
            return {'lcNumber': lcNumber, 'expMJD': dataSlice[self.mjdCol], 'epoch': lcEpoch,
                    'filter': dataSlice[self.filterCol], 'lcMag': lcMags, 'SNR': lcSNR,
                    'detected': lcDetectOut}
        else:
            return float(nDetected) / nTransMax