Beispiel #1
0
def download_images_from_metadatas(metadatas):
    """ Given a list of metadata instances, download the 'pfilename' files
        and return PTFImage objects for each file
    """
    ptf_images = []
    for metadata in metadatas:
        url = os.path.join(IPAC_DATA_URL, metadata["pfilename"])
        ptf_images.append(PTFImage(retrieve_ipac_file(url), metadata=metadata))
        print greenText("Image {} downloaded.".format(os.path.basename(metadata["pfilename"])))
    
    return ptf_images
Beispiel #2
0
def download_images_from_metadatas(metadatas):
    """ Given a list of metadata instances, download the 'pfilename' files
        and return PTFImage objects for each file
    """
    ptf_images = []
    for metadata in metadatas:
        url = os.path.join(IPAC_DATA_URL, metadata["pfilename"])
        ptf_images.append(PTFImage(retrieve_ipac_file(url), metadata=metadata))
        print greenText("Image {} downloaded.".format(
            os.path.basename(metadata["pfilename"])))

    return ptf_images
Beispiel #3
0
def ptf_images_from_position(ra,
                             dec,
                             size,
                             intersect="covers",
                             filter="R",
                             epoch=None,
                             number=None):
    """ Creates PTF FITS Images given an equatorial position (RA/Dec) 
        and a size.
        
        Parameters
        ----------
        ra : apwlib.geometry.RA or any type parsable by apwlib
            A right ascension.
        dec : apwlib.geometry.Dec or any type parsable by apwlib
            A declination.
        size : apwlib.geometry.Angle, tuple
            An angular extent on the sky, or a tuple of 2 angular extents
            representing a size in RA and a size in Dec.
        intersect : str
            See IPAC image query documentation:
                http://kanaloa.ipac.caltech.edu/ibe/queries.html
        filter : (optional) str
            Select only observations of this filter.
        epoch : (optional) float
            The MJD of the observation of the image. If not specified, the
            image with the best seeing is returned.
        number : (optional) int
            Constrain the number of images to return.
    """

    ra = g.RA(ra)
    dec = g.Dec(dec)

    if isinstance(size, tuple) and isinstance(size[0], g.Angle) and isinstance(
            size[1], g.Angle):
        size_str = "SIZE={w.degrees},{h.degrees}".format(w=size[0], h=size[1])
    elif isinstance(size, g.Angle):
        size_str = "SIZE={0.degrees}".format(size)
    else:
        raise TypeError(
            "'size' must be a tuple of apwlib.geometry.Angle objects, or a single Angle object."
        )

    # Construct search URL with parameters
    pos_str = "POS={ra.degrees},{dec.degrees}".format(ra=ra, dec=dec)

    if number == 1:
        intersect_str = "INTERSECT=CENTER&mcen"
    else:
        intersect_str = "INTERSECT={}".format(intersect.upper())

    search_url_append = "?{}&{}&{}&where=filter IN ('{}')".format(
        pos_str, size_str, intersect_str, filter)

    if epoch is not None:
        search_url_append += " AND obsmjd IN ({})".format(epoch)

    table_file = retrieve_ipac_file(
        IPAC_SEARCH_URL + urllib.quote(search_url_append) +
        "&columns={}".format(",".join(SEARCH_COLUMNS)))

    metadatas = parse_ipac_table(table_file)

    num = 0
    ptf_images = []
    for metadata in metadatas:
        cutout_url = os.path.join(IPAC_DATA_URL, metadata["pfilename"])
        cutout_url_query = "?center={ra.degrees},{dec.degrees}&{size}&gzip=false".format(
            ra=ra, dec=dec, size=size_str.lower())

        try:
            ptf_images.append(
                PTFImage(retrieve_ipac_file(cutout_url + cutout_url_query),
                         metadata=metadata))
            num += 1
            print greenText("Image {} downloaded.".format(
                os.path.basename(ptf_images[-1].metadata["pfilename"])))
        except urllib2.HTTPError:
            print yellowText(
                "Image failed to download:\n\t{}".format(cutout_url +
                                                         cutout_url_query))

        if number is not None and num >= number:
            break

    return ptf_images
Beispiel #4
0
def compare_detection_efficiencies_on_field(
        field,
        light_curves_per_ccd,
        events_per_light_curve,
        overwrite=False,
        indices=["j", "k", "eta", "sigma_mu", "delta_chi_squared"],
        u0s=[],
        limiting_mags=[]):
    """ TODO: document """

    if u0s == None or len(u0s) == 0:
        u0s = [None]

    if limiting_mags == None or len(limiting_mags) == 0:
        limiting_mags = [14.3, 21]

    file_base = "field{:06d}_Nperccd{}_Nevents{}_u0_{}_m{}".format(
        field.id, light_curves_per_ccd, events_per_light_curve, "-".join(
            map(str, u0s)), "-".join(map(str, limiting_mags))) + ".{ext}"
    pickle_filename = os.path.join("data", "detectionefficiency",
                                   file_base.format(ext="pickle"))
    plot_filename = os.path.join("plots", "detectionefficiency",
                                 file_base.format(ext="png"))
    fpr_plot_filename = os.path.join(
        "plots", "detectionefficiency",
        "fpr_{}".format(file_base.format(ext="png")))

    if not os.path.exists(os.path.dirname(pickle_filename)):
        os.mkdir(os.path.dirname(pickle_filename))

    if not os.path.exists(os.path.dirname(plot_filename)):
        os.mkdir(os.path.dirname(plot_filename))

    if os.path.exists(pickle_filename) and overwrite:
        logger.debug("Data file exists, but you want to overwrite it!")
        os.remove(pickle_filename)
        logger.debug("Data file deleted...")

    # If the cache pickle file doesn't exist, generate the data
    if not os.path.exists(pickle_filename):
        logger.info("Data file {} not found. Generating data...".format(
            pickle_filename))

        # Conditions for reading from the 'sources' table
        #   - Only select sources with enough good observations (>25)
        #   - Omit sources with large amplitude variability so they don't mess with our simulation
        wheres = [
            "(ngoodobs > 25)", "(stetsonJ < 100)", "(vonNeumannRatio > 1.0)",
            "(stetsonJ > 0)"
        ]

        # Keep track of calculated var indices for each CCD
        var_indices = dict()
        var_indices_with_events = dict()

        for ccd in field.ccds.values():
            logger.info(greenText("Starting with CCD {}".format(ccd.id)))

            # Get the chip object for this CCD
            chip = ccd.read()

            for ii, limiting_mag in enumerate(limiting_mags[:-1]):
                # Define bin edges for selection on reference magnitude
                limiting_mag1 = limiting_mag
                limiting_mag2 = limiting_mags[ii + 1]
                mag_key = (limiting_mag1, limiting_mag2)
                logger.info("\tMagnitude range: {:.2f} - {:.2f}".format(
                    limiting_mag1, limiting_mag2))

                if not var_indices_with_events.has_key(mag_key):
                    var_indices_with_events[mag_key] = dict()

                read_wheres = wheres + [
                    "(referenceMag >= {:.3f})".format(limiting_mag1)
                ]
                read_wheres += [
                    "(referenceMag < {:.3f})".format(limiting_mag2)
                ]

                # Read information from the 'sources' table
                source_ids = chip.sources.readWhere(
                    " & ".join(read_wheres))["matchedSourceID"]

                # Randomly shuffle the sources
                np.random.shuffle(source_ids)
                logger.info("\t\tSelected {} source ids".format(
                    len(source_ids)))

                dtype = zip(indices, [float] * len(indices))
                count = 0
                good_source_ids = []
                for source_id in source_ids:
                    logger.debug("\t\t\tSource ID: {}".format(source_id))
                    light_curve = ccd.light_curve(source_id,
                                                  clean=True,
                                                  barebones=True)

                    # After my quality cut, if light curve has less than 25 observations, skip it!
                    if len(light_curve.mjd) < 25:
                        continue

                    # Run simulation to compute false positive variability indices
                    logger.debug("Starting false positive simulation")
                    these_indices = simulate_light_curves_compute_indices(
                        light_curve,
                        num_simulated=events_per_light_curve,
                        indices=indices)
                    try:
                        var_indices_simulated = np.hstack(
                            (var_indices_simulated, these_indices))
                    except NameError:
                        var_indices_simulated = these_indices

                    these_var_indices = np.array([
                        analyze.compute_variability_indices(
                            light_curve, indices, return_tuple=True)
                    ],
                                                 dtype=dtype)
                    try:
                        var_indices[mag_key] = np.hstack(
                            (var_indices[mag_key], these_var_indices))
                    except KeyError:
                        var_indices[mag_key] = these_var_indices

                    for u0 in u0s:
                        logger.debug(
                            "Starting detection efficiency computation for u0={}"
                            .format(u0))
                        these_indices = simulate_events_compute_indices(
                            light_curve,
                            events_per_light_curve=events_per_light_curve,
                            indices=indices,
                            u0=u0)
                        try:
                            var_indices_with_events[mag_key][u0] = np.hstack(
                                (var_indices_with_events[mag_key][u0],
                                 these_indices))
                        except KeyError:
                            var_indices_with_events[mag_key][
                                u0] = these_indices

                    good_source_ids.append(source_id)
                    count += 1
                    if count >= light_curves_per_ccd: break

                if len(good_source_ids) == 0:
                    logger.error(
                        "No good sources selected from this CCD for mag range {:.2f}-{:.2f}!"
                        .format(limiting_mag1, limiting_mag2))
                    continue

                logger.info("\t\t{} good light curves selected".format(count))

                # HACK: This is super hacky...
                # ----------------------------------------------
                while count < light_curves_per_ccd:
                    idx = np.random.randint(len(good_source_ids))
                    source_id = good_source_ids[idx]

                    logger.debug("\t\t\tSource ID: {}".format(source_id))
                    light_curve = ccd.light_curve(source_id,
                                                  clean=True,
                                                  barebones=True)
                    #light_curve.shuffle()

                    these_var_indices = np.array([
                        analyze.compute_variability_indices(
                            light_curve, indices, return_tuple=True)
                    ],
                                                 dtype=dtype)
                    try:
                        var_indices[mag_key] = np.hstack(
                            (var_indices[mag_key], these_var_indices))
                    except KeyError:
                        var_indices[mag_key] = these_var_indices

                    for u0 in u0s:
                        these_indices = simulate_events_compute_indices(
                            light_curve,
                            events_per_light_curve=events_per_light_curve,
                            indices=indices,
                            u0=u0)
                        try:
                            var_indices_with_events[mag_key][u0] = np.hstack(
                                (var_indices_with_events[mag_key][u0],
                                 these_indices))
                        except KeyError:
                            var_indices_with_events[mag_key][
                                u0] = these_indices

                    count += 1
                # ----------------------------------------------

            ccd.close()

        f = open(pickle_filename, "w")
        pickle.dump(
            (var_indices, var_indices_with_events, var_indices_simulated), f)
        f.close()
    else:
        logger.info("Data file {} already exists".format(pickle_filename))

    logger.info(
        greenText("Starting plot routine!") +
        "\n  Data source: {}\n  Plotting and saving to: {}".format(
            pickle_filename, plot_filename))
    logger.debug("\t\tReading in data file...")
    f = open(pickle_filename, "r")
    var_indices, var_indices_with_events, var_indices_simulated = pickle.load(
        f)
    f.close()
    logger.debug("\t\tData loaded!")

    # Styling for lines: J, K, eta, sigma_mu, delta_chi_squared
    line_styles = { "j" : {"lw" : 1, "ls" : "-", "color" : "r", "alpha" : 0.5}, \
                   "k" : {"lw" : 1, "ls" : "-", "color" : "g", "alpha" : 0.5}, \
                   "eta" : {"lw" : 3, "ls" : "-", "color" : "k"}, \
                   "sigma_mu" : {"lw" : 1, "ls" : "-", "color" : "b", "alpha" : 0.5}, \
                   "delta_chi_squared" : {"lw" : 3, "ls" : "--", "color" : "k"},
                   "con" : {"lw" : 3, "ls" : ":", "color" : "k"},
                   "corr" : {"lw" : 3, "ls" : ":", "color" : "r"}}

    num_u0_bins = len(u0s)
    num_mag_bins = len(limiting_mags) - 1

    # Detection efficiency figure, axes
    eff_fig, eff_axes = plt.subplots(num_u0_bins,
                                     num_mag_bins,
                                     sharex=True,
                                     sharey=True,
                                     figsize=(25, 25))

    for ii, limiting_mag_pair in enumerate(sorted(var_indices.keys())):
        selection_indices = var_indices[limiting_mag_pair]
        for jj, u0 in enumerate(
                sorted(var_indices_with_events[limiting_mag_pair].keys())):

            Nsigmas = determine_Nsigma(selection_indices,
                                       var_indices_simulated, indices)

            # Log the values of N x sigma
            for index, Nsigma in Nsigmas.items():
                logger.info("{} = {}sigma".format(index, Nsigma))

            data = compute_detection_efficiency(
                selection_indices,
                var_indices_with_events[limiting_mag_pair][u0],
                indices,
                Nsigmas=Nsigmas)

            try:
                eff_ax = eff_axes[ii, jj]
            except:
                # If only one axis
                eff_ax = eff_axes

            for index_name in indices:
                eff_ax.semilogx((data["bin_edges"][1:]+data["bin_edges"][:-1])/2, \
                             data[index_name]["detections_per_bin"] / data["total_counts_per_bin"], \
                             label=r"{}".format(index_to_label[index_name]), \
                             **line_styles[index_name])
                #label=r"{}: $\varepsilon$={:.3f}, $F$={:.1f}%".format(index_to_label[index_name], data[index_name]["total_efficiency"], data[index_name]["num_false_positives"]/(11.*events_per_light_curve*light_curves_per_ccd)*100), \

            # Fix the y range and modify the tick lines
            eff_ax.set_ylim(0., 1.0)
            eff_ax.tick_params(which='major', length=10, width=2)
            eff_ax.tick_params(which='minor', length=5, width=1)

            if ii == 0:
                try:
                    eff_ax.set_title("$u_0$={:.2f}".format(u0), size=36, y=1.1)
                except:
                    pass

            if jj == (num_mag_bins - 1):
                eff_ax.set_ylabel("{:.1f}<R<{:.1f}".format(*limiting_mag_pair),
                                  size=32,
                                  rotation="horizontal")
                eff_ax.yaxis.set_label_position("right")

            plt.setp(eff_ax.get_xticklabels(), visible=False)
            plt.setp(eff_ax.get_yticklabels(), visible=False)

            if jj == 0:
                plt.setp(eff_ax.get_yticklabels(), visible=True, size=24)
                eff_ax.set_ylabel(r"$\varepsilon$", size=38)
            if ii == (num_u0_bins - 1):
                eff_ax.set_xlabel(r"$t_E$ [days]", size=34)
                plt.setp(eff_ax.get_xticklabels(), visible=True, size=24)

            if jj == (num_mag_bins - 1) and ii == (num_u0_bins - 1):
                legend = eff_ax.legend(loc="upper right")
                legend_text = legend.get_texts()
                plt.setp(legend_text, fontsize=36)

    eff_fig.subplots_adjust(hspace=0.1, wspace=0.1, right=0.88)
    logger.debug("Saving figure and cleaning up!")
    eff_fig.savefig(plot_filename)
    #fpr_fig.savefig(fpr_plot_filename)

    vifigure = VIFigure(indices=indices, figsize=(22, 22))
    vifigure.scatter(var_indices_simulated, alpha=0.1)
    #vifigure.contour(var_indices, nbins=50)
    vifigure.beautify()
    plot_path = os.path.join("plots", "var_indices")
    vifigure.save(
        os.path.join(
            plot_path,
            "field{}_Nperccd{}_Nevents{}.png".format(field.id,
                                                     light_curves_per_ccd,
                                                     events_per_light_curve)))
Beispiel #5
0
        type=float,
        default=None,
        help=
        "Specify the magnitude bin edges, e.g. 6 bin edges specifies 5 bins.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(logging.DEBUG)
    elif args.quiet:
        logger.setLevel(logging.ERROR)
    else:
        logger.setLevel(logging.INFO)

    if args.test:
        np.random.seed(42)

        print "\n\n\n"
        greenText("/// Tests Complete! ///")
        sys.exit(0)

    np.random.seed(42)
    field = pdb.Field(args.field_id, filter="R")
    compare_detection_efficiencies_on_field(field, indices=["eta","delta_chi_squared", "con", "j","k","sigma_mu"], \
                                            events_per_light_curve=args.N,
                                            light_curves_per_ccd=args.limit,
                                            overwrite=args.overwrite,
                                            u0s=args.u0,
                                            limiting_mags=args.limiting_mag)
    #example_light_curves(field, u0s=args.u0, limiting_mags=args.limiting_mag)
Beispiel #6
0
def get_var_indices(field,
                    light_curves_per_ccd,
                    events_per_light_curve,
                    indices,
                    overwrite=True):
    """ This function will create a len(indices) x len(indices) plot grid and plot
        distributions of all of the variability indices
    """

    limiting_mags = [14.3, 21]

    file_base = "field{:06d}_Nperccd{}_Nevents{}".format(
        field.id, light_curves_per_ccd, events_per_light_curve) + ".{ext}"
    pickle_filename = os.path.join("data", "var_indices",
                                   file_base.format(ext="pickle"))
    plot_filename = os.path.join("plots", "var_indices",
                                 file_base.format(ext="png"))

    if not os.path.exists(os.path.dirname(pickle_filename)):
        os.mkdir(os.path.dirname(pickle_filename))

    if not os.path.exists(os.path.dirname(plot_filename)):
        os.mkdir(os.path.dirname(plot_filename))

    if os.path.exists(pickle_filename) and overwrite:
        logger.debug("Data file exists, but you want to overwrite it!")
        os.remove(pickle_filename)
        logger.debug("Data file deleted...")

    # If the cache pickle file doesn't exist, generate the data
    if not os.path.exists(pickle_filename):
        logger.info("Data file {} not found. Generating data...".format(
            pickle_filename))

        # Conditions for reading from the 'sources' table
        #   - Only select sources with enough good observations (>25)
        wheres = [
            "(ngoodobs > 25)", "(vonNeumannRatio > 1)", "(stetsonJ > 0)",
            "(stetsonJ < 100)"
        ]

        for ccd in field.ccds.values():
            logger.info(greenText("Starting with CCD {}".format(ccd.id)))

            # Get the chip object for this CCD
            chip = ccd.read()

            for ii, limiting_mag in enumerate(limiting_mags[:-1]):
                # Define bin edges for selection on reference magnitude
                limiting_mag1 = limiting_mag
                limiting_mag2 = limiting_mags[ii + 1]
                mag_key = (limiting_mag1, limiting_mag2)
                logger.info("\tMagnitude range: {:.2f} - {:.2f}".format(
                    limiting_mag1, limiting_mag2))

                read_wheres = wheres + [
                    "(referenceMag >= {:.3f})".format(limiting_mag1)
                ]
                read_wheres += [
                    "(referenceMag < {:.3f})".format(limiting_mag2)
                ]

                # Read information from the 'sources' table
                sources = chip.sources.readWhere(" & ".join(read_wheres))
                #source_ids = sources["matchedSourceID"]
                source_idxs = range(len(sources))

                # Randomly shuffle the sources
                np.random.shuffle(source_idxs)
                logger.info("\t\tSelected {} source ids".format(len(sources)))

                dtype = zip(indices + ["tE", "u0", "m", "event_added"],
                            [float] * len(indices) +
                            [float, float, float, bool])
                count = 0
                good_source_ids = []
                for source_idx in source_idxs:
                    source = sources[source_idx]
                    source_id = source["matchedSourceID"]

                    logger.debug("\t\t\tSource ID: {}".format(source_id))
                    light_curve = ccd.light_curve(source_id,
                                                  clean=True,
                                                  barebones=True)

                    # After quality cut, if light curve has less than 25 observations, skip it!
                    if len(light_curve.mjd) < 25:
                        continue

                    these_var_indices = np.array([
                        analyze.compute_variability_indices(
                            light_curve, indices, return_tuple=True) +
                        (None, None, np.median(light_curve.mag), False)
                    ],
                                                 dtype=dtype)
                    try:
                        var_indices = np.hstack(
                            (var_indices, these_var_indices))
                    except NameError:
                        var_indices = these_var_indices

                    these_indices = de.simulate_events_compute_indices(
                        light_curve,
                        events_per_light_curve=events_per_light_curve,
                        indices=indices)
                    try:
                        var_indices_with_events = np.hstack(
                            (var_indices_with_events, these_indices))
                    except NameError:
                        var_indices_with_events = these_indices

                    good_source_ids.append(source_id)
                    count += 1
                    if count >= light_curves_per_ccd and light_curves_per_ccd != 0:
                        break

                if len(good_source_ids) == 0:
                    logger.error(
                        "No good sources selected from this CCD for mag range {:.2f}-{:.2f}!"
                        .format(limiting_mag1, limiting_mag2))
                    continue

                logger.info("\t\t{} good light curves selected".format(count))

            ccd.close()

        with open(pickle_filename, "w") as f:
            pickle.dump((var_indices, var_indices_with_events), f)

    else:
        logger.info("Data file {} already exists".format(pickle_filename))

    logger.debug("\t\tReading in data file...")
    f = open(pickle_filename, "r")
    var_indices, var_indices_with_events = pickle.load(f)
    f.close()

    return var_indices, var_indices_with_events
Beispiel #7
0
def detection_efficiency_for_field(field, ccds=range(12), config=dict(), overwrite=False, indices=["eta","sigma_mu","j","k", "delta_chi_squared"], plot=True):
    """ Run a detection efficiency simulation for a PTF field """   
    
    # Get configuration variables or defaults
    min_number_of_good_observations = config.get("min_number_of_good_observations", 100)
    number_of_fpr_light_curves = config.get("number_of_fpr_light_curves", 10)
    number_of_fpr_simulations_per_light_curve = config.get("number_of_fpr_simulations_per_light_curve", 10)
    number_of_microlensing_light_curves = config.get("number_of_microlensing_light_curves", 10)
    number_of_microlensing_simulations_per_light_curve = config.get("number_of_microlensing_simulations_per_light_curve", 10)
    
    # Convenience variables for filenames
    file_base = "field{:06d}_Nperccd{}_Nevents{}".format(field.id, number_of_microlensing_light_curves, number_of_microlensing_simulations_per_light_curve) + ".{ext}"
    pickle_filename = os.path.join("data", "new_detection_efficiency", file_base.format(ext="pickle"))
    plot_filename = os.path.join("plots", "new_detection_efficiency", file_base.format(ext="pdf"))
    
    if not os.path.exists(os.path.dirname(pickle_filename)):
        os.mkdir(os.path.dirname(pickle_filename))
    
    if not os.path.exists(os.path.dirname(plot_filename)):
        os.mkdir(os.path.dirname(plot_filename))
    
    if os.path.exists(pickle_filename) and overwrite:
        logger.debug("Data file exists, but you want to overwrite it!")
        os.remove(pickle_filename)
        logger.debug("Data file deleted...")
    
    #print(pickle_filename, os.path.exists(pickle_filename))

    # If the cache pickle file doesn't exist, generate the data
    if not os.path.exists(pickle_filename):
        logger.info("Data file {} not found. Generating data...".format(pickle_filename))
        
        # Initialize my PDB statistic dictionary
        # I use a dictionary here because after doing some sub-selection the index arrays may 
        #   have difference lengths.
        pdb_statistics = dict()
        for index in indices:
            pdb_statistics[index] = np.array([])
            
        for ccd in field.ccds.values():
            if ccd.id not in ccds: continue
            
            logger.info(greenText("Starting with CCD {}".format(ccd.id)))
            chip = ccd.read()
            
            logger.info("Getting variability statistics from photometric database")
            source_ids = []
            pdb_statistics_array = []
            for source in chip.sources.where("(ngoodobs > {})".format(min_number_of_good_observations)):
                pdb_statistics_array.append(tuple([source_index_name_to_pdb_index(source,index) for index in indices]))
                source_ids.append(source["matchedSourceID"])
            pdb_statistics_array = np.array(pdb_statistics_array, dtype=[(index,float) for index in indices])
            
            logger.debug("Selected {} statistics".format(len(pdb_statistics_array)))
            
            # I use a dictionary here because after doing some sub-selection the index arrays may 
            #   have difference lengths.
            for index in indices:
                this_index_array = pdb_statistics_array[index]
                
                # This is where I need to define the selection distributions for each index.
                pdb_statistics[index] = np.append(pdb_statistics[index], prune_index_distribution(index, this_index_array))
            
            # Randomize the order of source_ids to prune through
            np.random.shuffle(source_ids)
            
            logger.info("Simulating light curves for false positive rate calculation")
            # Keep track of how many light curves we've used, break after we reach the specified number
            light_curve_count = 0
            for source_id in source_ids:
                light_curve = ccd.light_curve(source_id, barebones=True, clean=True)
                if len(light_curve.mjd) < min_number_of_good_observations: 
                    logger.debug("\tRejected source {}".format(source_id))
                    continue
                    
                logger.debug("\tSelected source {}".format(source_id))
                these_indices = vi.simulate_light_curves_compute_indices(light_curve, num_simulated=number_of_fpr_simulations_per_light_curve, indices=indices)
                try:
                    simulated_light_curve_statistics = np.hstack((simulated_light_curve_statistics, these_indices))
                except NameError:
                    simulated_light_curve_statistics = these_indices
                    
                light_curve_count += 1
                
                if light_curve_count >= number_of_fpr_light_curves:
                    break
                        
            logger.info("Starting microlensing event simulations")
            # Keep track of how many light curves we've used, break after we reach the specified number
            light_curve_count = 0            
            for source_id in source_ids:
                light_curve = ccd.light_curve(source_id, barebones=True, clean=True)
                if len(light_curve.mjd) < min_number_of_good_observations: 
                    logger.debug("\tRejected source {}".format(source_id))
                    continue
                
                logger.debug("\tSelected source {}".format(source_id))
                one_light_curve_statistics = vi.simulate_events_compute_indices(light_curve, events_per_light_curve=number_of_microlensing_simulations_per_light_curve, indices=indices)
                try:
                    simulated_microlensing_statistics = np.hstack((simulated_microlensing_statistics, one_light_curve_statistics))
                except NameError:
                    simulated_microlensing_statistics = one_light_curve_statistics

                light_curve_count += 1                
                if light_curve_count >= number_of_microlensing_light_curves:
                    break
            
            ccd.close()
        
        logger.info("Starting false positive rate calculation to get Nsigmas")
        # Now determine the N in N-sigma by computing the false positive rate and getting it to be ~0.01 (1%) for each index
        selection_criteria = {}
        for index in indices:
            logger.debug("\tIndex: {}".format(index))
            # Get the mean and standard deviation of the 'vanilla' distributions to select with
            mu,sigma = np.mean(pdb_statistics[index]), np.std(pdb_statistics[index])
            logger.debug("\t mu={}, sigma={}".format(mu, sigma))
            
            # Get the simulated statistics for this index
            these_statistics = np.log10(simulated_light_curve_statistics[index])
            
            # Start by selecting with Nsigma = 0
            Nsigma = 0.
            
            # Nsteps is the number of steps this routine has to take to converge -- just used for diagnostics
            Nsteps = 0
            while True:
                fpr = np.sum((these_statistics > (mu + Nsigma*sigma)) | (these_statistics < (mu - Nsigma*sigma))) / float(len(these_statistics))
                logger.debug("Step: {}, FPR: {}".format(Nsteps, fpr))
                
                # WARNING: If you don't use enough simulations, this may never converge!
                if fpr > 0.012: 
                    Nsigma += np.random.uniform(0., 0.05)
                elif fpr < 0.008:
                    Nsigma -= np.random.uniform(0., 0.05)
                else:
                    break
                
                Nsteps += 1
                
                if Nsteps > 1000:
                    logger.warn("{} didn't converge!".format(index))
                    break
                
            logger.info("{} -- Final Num. steps: {}, Final FPR: {}".format(index, Nsteps, fpr))
            logger.info("{} -- Final Nsigma={}, Nsigma*sigma={}".format(index, Nsigma, Nsigma*sigma))
            
            selection_criteria[index] = dict()
            selection_criteria[index]["upper"] = mu + Nsigma*sigma
            selection_criteria[index]["lower"] = mu - Nsigma*sigma
        
        f = open(pickle_filename, "w")
        pickle.dump((simulated_microlensing_statistics, selection_criteria), f)
        f.close()       
        
    f = open(pickle_filename, "r")
    (simulated_microlensing_statistics, selection_criteria) = pickle.load(f)
    f.close()
    
    # Now compute the detection efficiency of each index using the selection criteria from the false positive rate simulation
    selected_distributions = {}
    detection_efficiencies = {}
    for index in indices:
        #this_index_values = simulated_microlensing_statistics[index]
        this_index_values = np.log10(simulated_microlensing_statistics[index])
        
        """
        if index == "eta":
            selection = this_index_values > 0
            this_index_values = np.log10(this_index_values[selection])
        elif index == "sigma_mu":
            selection = np.ones_like(this_index_values).astype(bool)
            this_index_values = np.log10(np.fabs(this_index_values))
        elif index == "j":
            selection = this_index_values > 0
            this_index_values = np.log10(this_index_values[selection])
        elif index == "k":
            selection = np.ones_like(this_index_values).astype(bool)
            this_index_values = np.log10(this_index_values)
        elif index == "delta_chi_squared":
            selection = this_index_values > 0
            this_index_values = np.log10(this_index_values[selection])
        """
        
        selected_ml_statistics = simulated_microlensing_statistics[(this_index_values > selection_criteria[index]["upper"]) | (this_index_values < selection_criteria[index]["lower"])]
        selected_distributions[index] = selected_ml_statistics
        
        total_detection_efficiency = len(selected_ml_statistics) / float(len(simulated_microlensing_statistics[index]))
        print "{}, eff={}".format(index, total_detection_efficiency)
        detection_efficiencies[index] = total_detection_efficiency
    
    if plot:
        plot_distributions(selected_distributions, simulated_microlensing_statistics, detection_efficiencies, params=["tE", "u0", "m"], filename=plot_filename, indices=indices)
    
    return simulated_microlensing_statistics, selected_distributions
Beispiel #8
0
def ptf_images_from_position(ra, dec, size, intersect="covers", filter="R", epoch=None, number=None):
    """ Creates PTF FITS Images given an equatorial position (RA/Dec) 
        and a size.
        
        Parameters
        ----------
        ra : apwlib.geometry.RA or any type parsable by apwlib
            A right ascension.
        dec : apwlib.geometry.Dec or any type parsable by apwlib
            A declination.
        size : apwlib.geometry.Angle, tuple
            An angular extent on the sky, or a tuple of 2 angular extents
            representing a size in RA and a size in Dec.
        intersect : str
            See IPAC image query documentation:
                http://kanaloa.ipac.caltech.edu/ibe/queries.html
        filter : (optional) str
            Select only observations of this filter.
        epoch : (optional) float
            The MJD of the observation of the image. If not specified, the
            image with the best seeing is returned.
        number : (optional) int
            Constrain the number of images to return.
    """
        
    ra = g.RA(ra)
    dec = g.Dec(dec)
    
    if isinstance(size, tuple) and isinstance(size[0], g.Angle)  and isinstance(size[1], g.Angle):
        size_str = "SIZE={w.degrees},{h.degrees}".format(w=size[0], h=size[1])
    elif isinstance(size, g.Angle):
        size_str = "SIZE={0.degrees}".format(size)
    else:
        raise TypeError("'size' must be a tuple of apwlib.geometry.Angle objects, or a single Angle object.") 
    
    # Construct search URL with parameters
    pos_str = "POS={ra.degrees},{dec.degrees}".format(ra=ra, dec=dec)
    
    if number == 1:
        intersect_str = "INTERSECT=CENTER&mcen"
    else:
        intersect_str = "INTERSECT={}".format(intersect.upper())
    
    search_url_append = "?{}&{}&{}&where=filter IN ('{}')".format(pos_str, size_str, intersect_str, filter)
    
    if epoch is not None:
        search_url_append += " AND obsmjd IN ({})".format(epoch)
    
    table_file = retrieve_ipac_file(IPAC_SEARCH_URL + urllib.quote(search_url_append) + "&columns={}".format(",".join(SEARCH_COLUMNS)))
    
    metadatas = parse_ipac_table(table_file)
    
    num = 0
    ptf_images = []
    for metadata in metadatas:
        cutout_url = os.path.join(IPAC_DATA_URL, metadata["pfilename"])
        cutout_url_query = "?center={ra.degrees},{dec.degrees}&{size}&gzip=false".format(ra=ra, dec=dec, size=size_str.lower())
        
        try:
            ptf_images.append(PTFImage(retrieve_ipac_file(cutout_url + cutout_url_query), metadata=metadata))
            num += 1
            print greenText("Image {} downloaded.".format(os.path.basename(ptf_images[-1].metadata["pfilename"])))
        except urllib2.HTTPError:    
            print yellowText("Image failed to download:\n\t{}".format(cutout_url + cutout_url_query))
            
        if number is not None and num >= number:
            break
    
    return ptf_images
Beispiel #9
0
def get_var_indices(field, light_curves_per_ccd, events_per_light_curve, indices, overwrite=True):
    """ This function will create a len(indices) x len(indices) plot grid and plot
        distributions of all of the variability indices
    """

    limiting_mags = [14.3, 21]
    
    file_base = "field{:06d}_Nperccd{}_Nevents{}".format(field.id, light_curves_per_ccd, events_per_light_curve) + ".{ext}"
    pickle_filename = os.path.join("data", "var_indices", file_base.format(ext="pickle"))
    plot_filename = os.path.join("plots", "var_indices", file_base.format(ext="png"))
    
    if not os.path.exists(os.path.dirname(pickle_filename)):
        os.mkdir(os.path.dirname(pickle_filename))
    
    if not os.path.exists(os.path.dirname(plot_filename)):
        os.mkdir(os.path.dirname(plot_filename))

    if os.path.exists(pickle_filename) and overwrite:
        logger.debug("Data file exists, but you want to overwrite it!")
        os.remove(pickle_filename)
        logger.debug("Data file deleted...")

    # If the cache pickle file doesn't exist, generate the data
    if not os.path.exists(pickle_filename):
        logger.info("Data file {} not found. Generating data...".format(pickle_filename))
        
        # Conditions for reading from the 'sources' table
        #   - Only select sources with enough good observations (>25)
        wheres = ["(ngoodobs > 25)", "(vonNeumannRatio > 1)", "(stetsonJ > 0)", "(stetsonJ < 100)"]
        
        for ccd in field.ccds.values():
            logger.info(greenText("Starting with CCD {}".format(ccd.id)))
            
            # Get the chip object for this CCD
            chip = ccd.read()
            
            for ii, limiting_mag in enumerate(limiting_mags[:-1]):
                # Define bin edges for selection on reference magnitude
                limiting_mag1 = limiting_mag
                limiting_mag2 = limiting_mags[ii+1]
                mag_key = (limiting_mag1,limiting_mag2)
                logger.info("\tMagnitude range: {:.2f} - {:.2f}".format(limiting_mag1, limiting_mag2))
                    
                read_wheres = wheres + ["(referenceMag >= {:.3f})".format(limiting_mag1)]
                read_wheres += ["(referenceMag < {:.3f})".format(limiting_mag2)]
                
                # Read information from the 'sources' table
                sources = chip.sources.readWhere(" & ".join(read_wheres))
                #source_ids = sources["matchedSourceID"]
                source_idxs = range(len(sources))
                
                # Randomly shuffle the sources
                np.random.shuffle(source_idxs)
                logger.info("\t\tSelected {} source ids".format(len(sources)))
                
                dtype = zip(indices + ["tE", "u0", "m", "event_added"], [float]*len(indices) + [float, float, float, bool])
                count = 0
                good_source_ids = []
                for source_idx in source_idxs:
                    source = sources[source_idx]
                    source_id = source["matchedSourceID"]
                    
                    logger.debug("\t\t\tSource ID: {}".format(source_id))
                    light_curve = ccd.light_curve(source_id, clean=True, barebones=True)
                    
                    # After quality cut, if light curve has less than 25 observations, skip it!
                    if len(light_curve.mjd) < 25:
                        continue
                    
                    these_var_indices = np.array([analyze.compute_variability_indices(light_curve, indices, return_tuple=True) + (None, None, np.median(light_curve.mag), False)], dtype=dtype)
                    try:
                        var_indices = np.hstack((var_indices, these_var_indices))
                    except NameError:
                        var_indices = these_var_indices
                    
                    these_indices = de.simulate_events_compute_indices(light_curve, events_per_light_curve=events_per_light_curve, indices=indices)
                    try:
                        var_indices_with_events = np.hstack((var_indices_with_events, these_indices))
                    except NameError:
                        var_indices_with_events = these_indices
                    
                    good_source_ids.append(source_id)
                    count += 1
                    if count >= light_curves_per_ccd and light_curves_per_ccd != 0: break
                
                if len(good_source_ids) == 0:
                    logger.error("No good sources selected from this CCD for mag range {:.2f}-{:.2f}!".format(limiting_mag1, limiting_mag2))
                    continue
                
                logger.info("\t\t{} good light curves selected".format(count))
                
            ccd.close()
        
        with open(pickle_filename, "w") as f:
            pickle.dump((var_indices, var_indices_with_events), f)
            
    else:
        logger.info("Data file {} already exists".format(pickle_filename))
    
    logger.debug("\t\tReading in data file...")
    f = open(pickle_filename, "r")
    var_indices, var_indices_with_events = pickle.load(f)
    f.close()
    
    return var_indices, var_indices_with_events
Beispiel #10
0
def test_iscandidate(plot=False):
    ''' Use test light curves to test selection:
        - Periodic
        - Bad data
        - Various simulated events
        - Flat light curve
        - Transients (SN, Nova, etc.)
    '''

    np.random.seed(10)

    logger.setLevel(logging.DEBUG)
    from ptf.lightcurve import SimulatedLightCurve
    import ptf.db.mongodb as mongo

    db = mongo.PTFConnection()

    logger.info("---------------------------------------------------")
    logger.info(greenText("Periodic light curves"))
    logger.info("---------------------------------------------------")

    # Periodic light curves
    periodics = [(4588, 7, 13227), (4588, 2, 15432), (4588, 9, 17195), (2562, 10, 28317), (4721, 8, 11979), (4162, 2, 14360)]

    for field_id, ccd_id, source_id in periodics:
        periodic_light_curve = pdb.get_light_curve(field_id, ccd_id, source_id, clean=True)
        periodic_light_curve.indices = pa.compute_variability_indices(periodic_light_curve, indices=["eta", "delta_chi_squared", "j", "k", "sigma_mu"])
        assert pa.iscandidate(periodic_light_curve, lower_eta_cut=10**db.fields.find_one({"_id" : field_id}, {"selection_criteria" : 1})["selection_criteria"]["eta"]) in ["subcandidate" , False]
        if plot: plot_lc(periodic_light_curve)

    logger.info("---------------------------------------------------")
    logger.info(greenText("Bad light curves"))
    logger.info("---------------------------------------------------")

    # Bad data
    bads = [(3756, 0, 14281), (1983, 10, 1580)]

    for field_id, ccd_id, source_id in bads:
        bad_light_curve = pdb.get_light_curve(field_id, ccd_id, source_id, clean=True)
        bad_light_curve.indices = pa.compute_variability_indices(bad_light_curve, indices=["eta", "delta_chi_squared", "j", "k", "sigma_mu"])
        assert not pa.iscandidate(bad_light_curve, lower_eta_cut=10**db.fields.find_one({"_id" : field_id}, {"selection_criteria" : 1})["selection_criteria"]["eta"])
        if plot: plot_lc(bad_light_curve)

    logger.info("---------------------------------------------------")
    logger.info(greenText("Simulated light curves"))
    logger.info("---------------------------------------------------")

    # Simulated light curves
    for field_id,mjd in [(4721,periodic_light_curve.mjd)]:
        for err in [0.01, 0.05, 0.1]:
            logger.debug("field: {0}, err: {1}".format(field_id,err))
            light_curve = SimulatedLightCurve(mjd=mjd, mag=15, error=[err])
            light_curve.indices = pa.compute_variability_indices(light_curve, indices=["eta", "delta_chi_squared", "j", "k", "sigma_mu"])
            assert not pa.iscandidate(light_curve, lower_eta_cut=10**db.fields.find_one({"_id" : field_id}, {"selection_criteria" : 1})["selection_criteria"]["eta"])

            light_curve.add_microlensing_event(u0=np.random.uniform(0.2, 0.8), t0=light_curve.mjd[int(len(light_curve)/2)], tE=light_curve.baseline/8.)
            light_curve.indices = pa.compute_variability_indices(light_curve, indices=["eta", "delta_chi_squared", "j", "k", "sigma_mu"])
            if plot:
                plt.clf()
                light_curve.plot()
                plt.savefig("plots/tests/{0}_{1}.png".format(field_id,err))
            assert pa.iscandidate(light_curve, lower_eta_cut=10**db.fields.find_one({"_id" : field_id}, {"selection_criteria" : 1})["selection_criteria"]["eta"])

    logger.info("---------------------------------------------------")
    logger.info(greenText("Transient light curves"))
    logger.info("---------------------------------------------------")

    # Transients (SN, Novae)
    transients = [(4564, 0, 4703), (4914, 6, 9673), (100041, 1, 4855), (100082, 5, 7447), (4721, 8, 3208), (4445, 7, 11458),\
                  (100003, 6, 10741), (100001, 10, 5466), (4789, 6, 11457), (2263, 0, 3214), (4077, 8, 15293), (4330, 10, 6648), \
                  (4913, 7, 13436), (100090, 7, 2070), (4338, 2, 10330), (5171, 0, 885)]

    for field_id, ccd_id, source_id in transients:
        transient_light_curve = pdb.get_light_curve(field_id, ccd_id, source_id, clean=True)
        logger.debug(transient_light_curve)
        transient_light_curve.indices = pa.compute_variability_indices(transient_light_curve, indices=["eta", "delta_chi_squared", "j", "k", "sigma_mu"])
        assert pa.iscandidate(transient_light_curve, lower_eta_cut=10**db.fields.find_one({"_id" : field_id}, {"selection_criteria" : 1})["selection_criteria"]["eta"])
        if plot: plot_lc(transient_light_curve)
Beispiel #11
0
def compare_detection_efficiencies_on_field(field, light_curves_per_ccd, events_per_light_curve, overwrite=False, indices=["j","k","eta","sigma_mu","delta_chi_squared"], u0s=[], limiting_mags=[]):
    """ TODO: document """   
    
    if u0s == None or len(u0s) == 0:
        u0s = [None]
    
    if limiting_mags == None or len(limiting_mags) == 0:
        limiting_mags = [14.3, 21]
    
    file_base = "field{:06d}_Nperccd{}_Nevents{}_u0_{}_m{}".format(field.id, light_curves_per_ccd, events_per_light_curve, "-".join(map(str,u0s)), "-".join(map(str,limiting_mags))) + ".{ext}"
    pickle_filename = os.path.join("data", "detectionefficiency", file_base.format(ext="pickle"))
    plot_filename = os.path.join("plots", "detectionefficiency", file_base.format(ext="png"))
    fpr_plot_filename = os.path.join("plots", "detectionefficiency", "fpr_{}".format(file_base.format(ext="png")))
    
    if not os.path.exists(os.path.dirname(pickle_filename)):
        os.mkdir(os.path.dirname(pickle_filename))
    
    if not os.path.exists(os.path.dirname(plot_filename)):
        os.mkdir(os.path.dirname(plot_filename))
    
    if os.path.exists(pickle_filename) and overwrite:
        logger.debug("Data file exists, but you want to overwrite it!")
        os.remove(pickle_filename)
        logger.debug("Data file deleted...")
    
    # If the cache pickle file doesn't exist, generate the data
    if not os.path.exists(pickle_filename):
        logger.info("Data file {} not found. Generating data...".format(pickle_filename))
        
        # Conditions for reading from the 'sources' table
        #   - Only select sources with enough good observations (>25)
        #   - Omit sources with large amplitude variability so they don't mess with our simulation
        wheres = ["(ngoodobs > 25)", "(stetsonJ < 100)", "(vonNeumannRatio > 1.0)", "(stetsonJ > 0)"]
        
        # Keep track of calculated var indices for each CCD
        var_indices = dict()
        var_indices_with_events = dict()
        
        for ccd in field.ccds.values():
            logger.info(greenText("Starting with CCD {}".format(ccd.id)))
            
            # Get the chip object for this CCD
            chip = ccd.read()
            
            for ii, limiting_mag in enumerate(limiting_mags[:-1]):
                # Define bin edges for selection on reference magnitude
                limiting_mag1 = limiting_mag
                limiting_mag2 = limiting_mags[ii+1]
                mag_key = (limiting_mag1,limiting_mag2)
                logger.info("\tMagnitude range: {:.2f} - {:.2f}".format(limiting_mag1, limiting_mag2))
                
                if not var_indices_with_events.has_key(mag_key):
                    var_indices_with_events[mag_key] = dict()
                    
                read_wheres = wheres + ["(referenceMag >= {:.3f})".format(limiting_mag1)]
                read_wheres += ["(referenceMag < {:.3f})".format(limiting_mag2)]
                
                # Read information from the 'sources' table
                source_ids = chip.sources.readWhere(" & ".join(read_wheres))["matchedSourceID"]
                
                # Randomly shuffle the sources
                np.random.shuffle(source_ids)
                logger.info("\t\tSelected {} source ids".format(len(source_ids)))
                
                dtype = zip(indices, [float]*len(indices))
                count = 0
                good_source_ids = []
                for source_id in source_ids:
                    logger.debug("\t\t\tSource ID: {}".format(source_id))
                    light_curve = ccd.light_curve(source_id, clean=True, barebones=True)
                    
                    # After my quality cut, if light curve has less than 25 observations, skip it!
                    if len(light_curve.mjd) < 25:
                        continue
                    
                    # Run simulation to compute false positive variability indices
                    logger.debug("Starting false positive simulation")
                    these_indices = simulate_light_curves_compute_indices(light_curve, num_simulated=events_per_light_curve, indices=indices)
                    try:
                        var_indices_simulated = np.hstack((var_indices_simulated, these_indices))
                    except NameError:
                        var_indices_simulated = these_indices
                    
                    these_var_indices = np.array([analyze.compute_variability_indices(light_curve, indices, return_tuple=True)], dtype=dtype)
                    try:
                        var_indices[mag_key] = np.hstack((var_indices[mag_key], these_var_indices))
                    except KeyError:
                        var_indices[mag_key] = these_var_indices
                    
                    for u0 in u0s:
                        logger.debug("Starting detection efficiency computation for u0={}".format(u0))
                        these_indices = simulate_events_compute_indices(light_curve, events_per_light_curve=events_per_light_curve, indices=indices, u0=u0)
                        try:
                            var_indices_with_events[mag_key][u0] = np.hstack((var_indices_with_events[mag_key][u0], these_indices))
                        except KeyError:
                            var_indices_with_events[mag_key][u0] = these_indices
                    
                    good_source_ids.append(source_id)
                    count += 1
                    if count >= light_curves_per_ccd: break
                
                if len(good_source_ids) == 0:
                    logger.error("No good sources selected from this CCD for mag range {:.2f}-{:.2f}!".format(limiting_mag1, limiting_mag2))
                    continue
                
                logger.info("\t\t{} good light curves selected".format(count))
                
                # HACK: This is super hacky...
                # ----------------------------------------------
                while count < light_curves_per_ccd:
                    idx = np.random.randint(len(good_source_ids))
                    source_id = good_source_ids[idx]
                    
                    logger.debug("\t\t\tSource ID: {}".format(source_id))
                    light_curve = ccd.light_curve(source_id, clean=True, barebones=True)
                    #light_curve.shuffle()
                    
                    these_var_indices = np.array([analyze.compute_variability_indices(light_curve, indices, return_tuple=True)], dtype=dtype)
                    try:
                        var_indices[mag_key] = np.hstack((var_indices[mag_key], these_var_indices))
                    except KeyError:
                        var_indices[mag_key] = these_var_indices
                    
                    for u0 in u0s:
                        these_indices = simulate_events_compute_indices(light_curve, events_per_light_curve=events_per_light_curve, indices=indices, u0=u0)
                        try:
                            var_indices_with_events[mag_key][u0] = np.hstack((var_indices_with_events[mag_key][u0], these_indices))
                        except KeyError:
                            var_indices_with_events[mag_key][u0] = these_indices
                    
                    count += 1
                # ----------------------------------------------   
                    
            ccd.close()
        
        f = open(pickle_filename, "w")
        pickle.dump((var_indices, var_indices_with_events, var_indices_simulated), f)
        f.close()
    else:
        logger.info("Data file {} already exists".format(pickle_filename))
    
    logger.info(greenText("Starting plot routine!") + "\n  Data source: {}\n  Plotting and saving to: {}".format(pickle_filename, plot_filename))
    logger.debug("\t\tReading in data file...")
    f = open(pickle_filename, "r")
    var_indices, var_indices_with_events, var_indices_simulated = pickle.load(f)
    f.close()
    logger.debug("\t\tData loaded!")
    
    # Styling for lines: J, K, eta, sigma_mu, delta_chi_squared
    line_styles = { "j" : {"lw" : 1, "ls" : "-", "color" : "r", "alpha" : 0.5}, \
                   "k" : {"lw" : 1, "ls" : "-", "color" : "g", "alpha" : 0.5}, \
                   "eta" : {"lw" : 3, "ls" : "-", "color" : "k"}, \
                   "sigma_mu" : {"lw" : 1, "ls" : "-", "color" : "b", "alpha" : 0.5}, \
                   "delta_chi_squared" : {"lw" : 3, "ls" : "--", "color" : "k"}, 
                   "con" : {"lw" : 3, "ls" : ":", "color" : "k"},
                   "corr" : {"lw" : 3, "ls" : ":", "color" : "r"}}
    
    num_u0_bins = len(u0s)
    num_mag_bins = len(limiting_mags)-1
    
    # Detection efficiency figure, axes
    eff_fig, eff_axes = plt.subplots(num_u0_bins, num_mag_bins, sharex=True, sharey=True, figsize=(25,25))
    
    for ii, limiting_mag_pair in enumerate(sorted(var_indices.keys())):
        selection_indices = var_indices[limiting_mag_pair]
        for jj, u0 in enumerate(sorted(var_indices_with_events[limiting_mag_pair].keys())):

            Nsigmas = determine_Nsigma(selection_indices, var_indices_simulated, indices)

            # Log the values of N x sigma
            for index,Nsigma in Nsigmas.items():
                logger.info("{} = {}sigma".format(index, Nsigma))
                
            data = compute_detection_efficiency(selection_indices, var_indices_with_events[limiting_mag_pair][u0], indices, Nsigmas=Nsigmas)
            
            try:
                eff_ax = eff_axes[ii, jj]
            except:
                # If only one axis
                eff_ax = eff_axes

            for index_name in indices:
                eff_ax.semilogx((data["bin_edges"][1:]+data["bin_edges"][:-1])/2, \
                             data[index_name]["detections_per_bin"] / data["total_counts_per_bin"], \
                             label=r"{}".format(index_to_label[index_name]), \
                             **line_styles[index_name])
                #label=r"{}: $\varepsilon$={:.3f}, $F$={:.1f}%".format(index_to_label[index_name], data[index_name]["total_efficiency"], data[index_name]["num_false_positives"]/(11.*events_per_light_curve*light_curves_per_ccd)*100), \
            
            # Fix the y range and modify the tick lines
            eff_ax.set_ylim(0., 1.0)
            eff_ax.tick_params(which='major', length=10, width=2)
            eff_ax.tick_params(which='minor', length=5, width=1)
            
            if ii == 0:
                try: eff_ax.set_title("$u_0$={:.2f}".format(u0), size=36, y=1.1)
                except: pass
                
            if jj == (num_mag_bins-1):
                eff_ax.set_ylabel("{:.1f}<R<{:.1f}".format(*limiting_mag_pair), size=32, rotation="horizontal")
                eff_ax.yaxis.set_label_position("right")
            
            plt.setp(eff_ax.get_xticklabels(), visible=False)
            plt.setp(eff_ax.get_yticklabels(), visible=False)
            
            if jj == 0:
                plt.setp(eff_ax.get_yticklabels(), visible=True, size=24)
                eff_ax.set_ylabel(r"$\varepsilon$", size=38)
            if ii == (num_u0_bins-1):
                eff_ax.set_xlabel(r"$t_E$ [days]", size=34)
                plt.setp(eff_ax.get_xticklabels(), visible=True, size=24)
            
            if jj == (num_mag_bins-1) and ii == (num_u0_bins-1):
                legend = eff_ax.legend(loc="upper right")
                legend_text  = legend.get_texts()
                plt.setp(legend_text, fontsize=36)
    
    eff_fig.subplots_adjust(hspace=0.1, wspace=0.1, right=0.88)
    logger.debug("Saving figure and cleaning up!")
    eff_fig.savefig(plot_filename) 
    #fpr_fig.savefig(fpr_plot_filename)
    
    vifigure = VIFigure(indices=indices, figsize=(22,22))
    vifigure.scatter(var_indices_simulated, alpha=0.1)
    #vifigure.contour(var_indices, nbins=50)
    vifigure.beautify()
    plot_path = os.path.join("plots", "var_indices")
    vifigure.save(os.path.join(plot_path, "field{}_Nperccd{}_Nevents{}.png".format(field.id, light_curves_per_ccd, events_per_light_curve)))
Beispiel #12
0
    parser.add_argument("--u0", dest="u0", nargs="+", type=float, default=None,
                    help="Only add microlensing events with the specified impact parameter.")
    parser.add_argument("--mag", dest="limiting_mag", nargs="+", type=float, default=None,
                    help="Specify the magnitude bin edges, e.g. 6 bin edges specifies 5 bins.")
    
    args = parser.parse_args()
    
    if args.verbose:
        logger.setLevel(logging.DEBUG)
    elif args.quiet:
        logger.setLevel(logging.ERROR)
    else:
        logger.setLevel(logging.INFO)
    
    if args.test:
        np.random.seed(42)
        
        print "\n\n\n"
        greenText("/// Tests Complete! ///")
        sys.exit(0)
    
    np.random.seed(42)
    field = pdb.Field(args.field_id, filter="R")
    compare_detection_efficiencies_on_field(field, indices=["eta","delta_chi_squared", "con", "j","k","sigma_mu"], \
                                            events_per_light_curve=args.N,
                                            light_curves_per_ccd=args.limit,
                                            overwrite=args.overwrite,
                                            u0s=args.u0,
                                            limiting_mags=args.limiting_mag)
    #example_light_curves(field, u0s=args.u0, limiting_mags=args.limiting_mag)