コード例 #1
0
    def lightcurve_in_flux(self):
        """
        Transform magnitude to flux using m=27.4-2.5*log10(flux) convention. Transform error bar
        accordingly. More details in microltoolbox module.

        :param string clean: 'Yes' or 'No'. Perform or not a clean_data call to avoid outliers.

        :return: the lightcurve in flux, lightcurve_flux.
        :rtype: array_like
        """

        lightcurve = self.lightcurve_magnitude

        time = lightcurve[:, 0]
        mag = lightcurve[:, 1]
        err_mag = lightcurve[:, 2]

        flux = microltoolbox.magnitude_to_flux(mag)
        error_flux = microltoolbox.error_magnitude_to_error_flux(err_mag, flux)
        lightcurve_in_flux = np.array([time, flux, error_flux]).T

        return lightcurve_in_flux
コード例 #2
0
def initial_guess_PSPL(event):
    """Function to find initial PSPL guess for Levenberg-Marquardt solver (method=='LM').
       This assumes no blending.

       :param object event: the event object on which you perform the fit on. More details on the
       event module.

       :return: the PSPL guess for this event.A list with Paczynski parameters (to,uo,tE) and the
       source flux of the survey telescope.
       :rtype: list,float
    """

    # to estimation
    to_estimations = []
    maximum_flux_estimations = []
    errors_magnitude = []

    for telescope in event.telescopes:
        # Lot of process here, if one fails, just skip
        lightcurve_magnitude = telescope.lightcurve_magnitude
        mean_error_magnitude = np.mean(lightcurve_magnitude[:, 2])
        try:

            # only the best photometry
            good_photometry_indexes = np.where((lightcurve_magnitude[:, 2] <
                                                max(0.1, mean_error_magnitude)))[0]
            lightcurve_bis = lightcurve_magnitude[good_photometry_indexes]

            lightcurve_bis = lightcurve_bis[lightcurve_bis[:, 0].argsort(), :]

            mag = lightcurve_bis[:, 1]
            flux = microltoolbox.magnitude_to_flux(mag)

            # clean the lightcurve using Savitzky-Golay filter on 3 points, degree 1.
            mag_clean = ss.savgol_filter(mag, 3, 1)
            time = lightcurve_bis[:, 0]
            flux_clean = microltoolbox.flux_to_magnitude(mag_clean)
            errmag = lightcurve_bis[:, 2]

            flux_source = min(flux_clean)
            good_points = np.where(flux_clean > flux_source)[0]

            while (np.std(time[good_points]) > 5) | (len(good_points) > 100):

                indexes = \
                    np.where((flux_clean[good_points] > np.median(flux_clean[good_points])) & (
                        errmag[good_points] <= max(0.1, 2.0 * np.mean(errmag[good_points]))))[0]

                if len(indexes) < 1:

                    break

                else:

                    good_points = good_points[indexes]

                    # gravity = (
                    #   np.median(time[good_points]), np.median(flux_clean[good_points]),
                    #    np.mean(errmag[good_points]))

                    # distances = np.sqrt((time[good_points] - gravity[0]) ** 2 / gravity[0] ** 2)

            to = np.median(time[good_points])
            max_flux = max(flux[good_points])
            to_estimations.append(to)
            maximum_flux_estimations.append(max_flux)
            errors_magnitude.append(np.mean(lightcurve_bis[good_points, 2]))

        except:

            time = lightcurve_magnitude[:, 0]
            flux = microltoolbox.magnitude_to_flux(lightcurve_magnitude[:, 1])
            to = np.median(time)
            max_flux = max(flux)
            to_estimations.append(to)
            maximum_flux_estimations.append(max_flux)

            errors_magnitude.append(mean_error_magnitude)

    to_guess = sum(np.array(to_estimations) / np.array(errors_magnitude) ** 2) / sum(
        1 / np.array(errors_magnitude) ** 2)
    survey = event.telescopes[0]
    lightcurve = survey.lightcurve_magnitude
    lightcurve = lightcurve[lightcurve[:, 0].argsort(), :]

    ## fs, uo, tE estimations only one the survey telescope


    time = lightcurve[:, 0]
    flux = microltoolbox.magnitude_to_flux(lightcurve[:, 1])
    errflux = microltoolbox.error_magnitude_to_error_flux(lightcurve[:, 2], flux)

    # fs estimation, no blend

    baseline_flux_0 = np.min(flux)
    baseline_flux = np.median(flux)

    while np.abs(baseline_flux_0 - baseline_flux) > 0.01 * baseline_flux:

        baseline_flux_0 = baseline_flux
        indexes = np.where((flux < baseline_flux))[0].tolist() + np.where(
            np.abs(flux - baseline_flux) < np.abs(errflux))[0].tolist()
        baseline_flux = np.median(flux[indexes])

        if len(indexes) < 100:
            print 'low'
            baseline_flux = np.median(flux[flux.argsort()[:100]])
            break

    fs_guess = baseline_flux

    # uo estimation
    max_flux = maximum_flux_estimations[0]
    Amax = max_flux / fs_guess
    if (Amax<1.0) | np.isnan(Amax):
        Amax=1.1
    uo_guess = np.sqrt(-2 + 2 * np.sqrt(1 - 1 / (1 - Amax ** 2)))


    # tE estimations
    tE_guesses = []

    # Method 1 : flux(t_demi_amplification) = 0.5 * fs_guess * (Amax + 1)

    half_magnification = 0.5 * (Amax + 1)

    flux_demi_amplification = fs_guess * half_magnification

    index_plus = np.where((time > to_guess) & (flux < flux_demi_amplification))[0]
    index_moins = np.where((time < to_guess) & (flux < flux_demi_amplification))[0]

    if len(index_plus) != 0:

        if len(index_moins) != 0:
            t_demi_amplification = (time[index_plus[0]] - time[index_moins[-1]])
            tE_demi_amplification = t_demi_amplification / (
                2 * np.sqrt(-2 + 2 * np.sqrt(1 + 1 / (half_magnification ** 2 - 1)) - uo_guess ** 2))

            tE_guesses.append(tE_demi_amplification)

        else:
            t_demi_amplification = time[index_plus[0]] - to_guess
            tE_demi_amplification = t_demi_amplification / np.sqrt(
                -2 + 2 * np.sqrt(1 + 1 / (half_magnification ** 2 - 1)) - uo_guess ** 2)

            tE_guesses.append(tE_demi_amplification)
    else:

        if len(index_moins) != 0:
            t_demi_amplification = to_guess - time[index_moins[-1]]
            tE_demi_amplification = t_demi_amplification / np.sqrt(
                -2 + 2 * np.sqrt(1 + 1 / (half_magnification ** 2 - 1)) - uo_guess ** 2)

            tE_guesses.append(tE_demi_amplification)

    # Method 2 : flux(t_E) = fs_guess * (uo^+3)/[(uo^2+1)^0.5*(uo^2+5)^0.5]

    amplification_tE = (uo_guess ** 2 + 3) / ((uo_guess ** 2 + 1) ** 0.5 * np.sqrt(uo_guess ** 2 + 5))
    flux_tE = fs_guess * amplification_tE

    index_tE_plus = np.where((flux < flux_tE) & (time > to))[0]
    index_tE_moins = np.where((flux < flux_tE) & (time < to))[0]

    if len(index_tE_moins) != 0:
        index_tE_moins = index_tE_moins[-1]
        tE_moins = to_guess - time[index_tE_moins]

        tE_guesses.append(tE_moins)

    if len(index_tE_plus) != 0:
        index_tE_plus = index_tE_plus[0]
        tE_plus = time[index_tE_plus] - to_guess

        tE_guesses.append(tE_plus)

    # Method 3 : the first points before/after to_guess that reach the baseline. Very rough
    # approximation ot tE.

    index_tE_baseline_plus = np.where((time > to) & (np.abs(flux - fs_guess) < np.abs(errflux)))[0]
    index_tE_baseline_moins = np.where((time < to) & (np.abs(flux - fs_guess) < np.abs(errflux)))[0]

    if len(index_tE_baseline_plus) != 0:
        tEPlus = time[index_tE_baseline_plus[0]] - to_guess

        tE_guesses.append(tEPlus)

    if len(index_tE_baseline_moins) != 0:
        tEMoins = to_guess - time[index_tE_baseline_moins[-1]]

        tE_guesses.append(tEMoins)

    tE_guess = np.median(tE_guesses)

    # safety reason, unlikely
    if (tE_guess < 0.1) | np.isnan(tE_guess):
        tE_guess = 20.0

    # [to,uo,tE], fsource

    return [to_guess, uo_guess, tE_guess], fs_guess