Exemple #1
0
def get_sky(expnum, ccd):
    uri = storage.get_uri(expnum, ccd, ext='phot')

    fobj = StringIO(storage.open_vos_or_local(uri).read())
    fobj.seek(0)
    phot_table = ascii.read(fobj)
    return phot_table['MSKY'].mean()
Exemple #2
0
    def parse(self, filename):
        """
        Parses a file into an AstromData structure.

        Args:
          filename: str
            The name of the file whose contents will be parsed.

        Returns:
          data: AstromData
            The file contents extracted into a data structure for programmatic
            access.
        """
        filehandle = storage.open_vos_or_local(filename, "rb")
        filestr = filehandle.read()
        filehandle.close()

        assert filestr is not None, "File contents are None"

        observations = self._parse_observation_list(filestr)

        self._parse_observation_headers(filestr, observations)

        sys_header = self._parse_system_header(filestr)

        sources = self._parse_source_data(filestr, observations)

        return AstromData(observations, sys_header, sources)
Exemple #3
0
Fichier : snr.py Projet : OSSOS/MOP
def get_sky(expnum, ccd):
    uri = storage.get_uri(expnum, ccd, ext='phot')

    fobj = StringIO(storage.open_vos_or_local(uri).read())
    fobj.seek(0)
    phot_table = ascii.read(fobj)
    return phot_table['MSKY'].mean()
Exemple #4
0
def _kbos_from_survey_sym_model_input_file(model_file):
    """
    Load a Survey Simulator model file as an array of ephem EllipticalBody objects.
    @param model_file:
    @return:
    """
    lines = storage.open_vos_or_local(model_file).read().split('\n')
    kbos = []
    for line in lines:
        if len(line) == 0 or line[0] == '#':  # skip initial column descriptors and the final blank line
            continue
        kbo = ephem.EllipticalBody()
        values = line.split()
        kbo.name = values[8]
        kbo.j = values[9]
        kbo.k = values[10]
        kbo._a = float(values[0])
        kbo._e = float(values[1])
        kbo._inc = float(values[2])
        kbo._Om = float(values[3])
        kbo._om = float(values[4])
        kbo._M = float(values[5])
        kbo._H = float(values[6])
        epoch = ephem.date(2453157.50000 - ephem.julian_date(0))
        kbo._epoch_M = epoch
        kbo._epoch = epoch
        kbos.append(kbo)
    return kbos
Exemple #5
0
def _kbos_from_survey_sym_model_input_file(model_file):
    """
    Load a Survey Simulator model file as an array of ephem EllipticalBody objects.
    @param model_file:
    @return:
    """
    lines = storage.open_vos_or_local(model_file).read().split('\n')
    kbos = []
    for line in lines:
        if len(line) == 0 or line[
                0] == '#':  # skip initial column descriptors and the final blank line
            continue
        kbo = ephem.EllipticalBody()
        values = line.split()
        kbo.name = values[8]
        kbo.j = values[9]
        kbo.k = values[10]
        kbo._a = float(values[0])
        kbo._e = float(values[1])
        kbo._inc = float(values[2])
        kbo._Om = float(values[3])
        kbo._om = float(values[4])
        kbo._M = float(values[5])
        kbo._H = float(values[6])
        epoch = ephem.date(2453157.50000 - ephem.julian_date(0))
        kbo._epoch_M = epoch
        kbo._epoch = epoch
        kbos.append(kbo)
    return kbos
Exemple #6
0
def build(filename):
    filehandle = storage.open_vos_or_local(filename, "rb")
    filestr = filehandle.read()
    filehandle.close()

    input_mpc_lines = filestr.split('\n')

    mpc_observations = []
    mag = []
    for line in input_mpc_lines:
        mpc_observation = mpc.Observation.from_string(line)
        if mpc_observation is not None:
            if mpc_observation.mag is not None and mpc_observation.mag > 0:
                mag.append(mpc_observation.mag)

            mpc_observations.append(mpc_observation)

    mpc_observations.sort(key=lambda obs: obs.date.jd)
    orbit = Orbfit(mpc_observations)

    mag = numpy.array(mag)
    print(orbit.residuals)

    print(
        "{:10s} {:7.2f} {:7.2f} {:7.3f} {:7.3f} {:7.3f} {:7.3f} {:7.3f} {:7.3f} {:7.3f}"
        .format(orbit.name, orbit.a, orbit.da, orbit.e, orbit.de, orbit.inc,
                orbit.dinc, orbit.distance, mag.mean(), mag.std()))
Exemple #7
0
    def parse(self, filename):
        """
        Parses a file into an AstromData structure.

        Args:
          filename: str
            The name of the file whose contents will be parsed.

        Returns:
          data: AstromData
            The file contents extracted into a data structure for programmatic
            access.
        """
        filehandle = storage.open_vos_or_local(filename, "rb")
        assert filehandle is not None, "Failed to open file {} ".format(
            filename)
        filestr = filehandle.read()
        filehandle.close()

        assert filestr is not None, "File contents are None"

        observations = self._parse_observation_list(filestr)

        self._parse_observation_headers(filestr, observations)

        sys_header = self._parse_system_header(filestr)

        sources = self._parse_source_data(filestr, observations)

        return AstromData(observations, sys_header, sources)
Exemple #8
0
    def parse(self, filename):
        filehandle = storage.open_vos_or_local(filename, "rb")
        filestr = filehandle.read()
        filehandle.close()

        input_mpc_lines = filestr.split('\n')

        mpc_observations = []
        for line in input_mpc_lines:
            mpc_observation = mpc.Observation.from_string(line)
            if mpc_observation is not None:
                mpc_observations.append(mpc_observation)

        mpc_observations.sort(key=lambda obs: obs.date.jd)


        # pass down the provisional name so the table lines are linked to this TNO
        self.ssos_parser=SSOSParser(mpc_observations[0].provisional_name,
                                    input_observations=mpc_observations)

        self.orbit = Orbfit(mpc_observations)

        print self.orbit
        print self.orbit.residuals

        self.orbit.predict('2013-04-01')  # HARDWIRING FOR E BLOCK FOR NOW
        coord1 = self.orbit.coordinate
        self.orbit.predict('2013-05-01')
        coord2 = self.orbit.coordinate
        motion_rate = coord1.separation(coord2).arcsecs/(self.orbit.arc_length*60.)  # how best to get arcsec moved between first/last?
        print "{:>10s} {:8.2f}".format('rate ("/hr)', motion_rate)

        self.orbit.predict('2014-04-04')  # hardwiring next year's prediction date for the moment
        print "{:>10s} {:8.2f} {:8.2f}\n".format("Expected accuracy on 4 April 2014 (arcsec)", self.orbit.dra, self.orbit.ddec)

        length_of_observation_arc = mpc_observations[-1].date.jd - mpc_observations[0].date.jd

        if  length_of_observation_arc < 1:
            # data from the same dark run.
            lunation_count = 0
        elif length_of_observation_arc > 1 and length_of_observation_arc < self._nights_per_darkrun :
            # data from neighbouring darkruns.
            lunation_count = 1
        else:
            # data from the entire project.
            lunation_count = None

        # loop over the query until some new observations are found, or raise assert error.
        while True:
            tracks_data = self.query_ssos(mpc_observations, lunation_count)

            print len(mpc_observations), tracks_data.get_reading_count(), lunation_count
            if ( tracks_data.get_arc_length() > length_of_observation_arc or
                tracks_data.get_reading_count() > len(mpc_observations) ) :
                return tracks_data
            assert lunation_count is not None, "No new observations available."
            lunation_count += 1
            if lunation_count > 2 :
                lunation_count = None
Exemple #9
0
    def table(self):
        if self._table is not None:
            return self._table
        lines = storage.open_vos_or_local(self.uri).read()

        # format the file for easy loading.
        new_lines = lines.replace("pix rate", "pix_rate")
        new_lines = new_lines.replace("""''/h rate""", "sky_rate")
        self._table = ascii.read(new_lines, header_start=-1, data_start=0)
        return self._table
Exemple #10
0
    def table(self):
        if self._table is not None:
            return self._table
        lines = storage.open_vos_or_local(self.uri).read()

        # format the file for easy loading.
        new_lines = lines.replace("pix rate", "pix_rate")
        new_lines = new_lines.replace("""''/h rate""", "sky_rate")
        self._table = ascii.read(new_lines, header_start=-1, data_start=0)
        return self._table
Exemple #11
0
def synthetic_model_kbos(
        at_date=parameters.NEWMOONS[parameters.DISCOVERY_NEW_MOON],
        maglimit=24.5,
        kbotype=False,
        arrays=False):
    # # build a list of Synthetic KBOs
    print "LOADING SYNTHETIC MODEL KBOS FROM: {}".format(parameters.L7MODEL)
    kbos = []
    if arrays:  # much easier to use for plt.scatter()
        ra = []
        dist = []
        hlat = []
    lines = storage.open_vos_or_local(parameters.L7MODEL).read().split('\n')
    counter = 0
    for line in lines:
        if len(line) == 0 or line[
                0] == '#':  # skip initial column descriptors and the final blank line
            continue
        kbo = ephem.EllipticalBody()
        values = line.split()
        kbo.name = values[8]
        if kbotype and (kbo.name == kbotype) and (values[9] == '3'):
            kbo._a = float(values[0])
            kbo._e = float(values[1])
            kbo._inc = float(values[2])
            kbo._Om = float(values[3])
            kbo._om = float(values[4])
            kbo._M = float(values[5])
            kbo._H = float(values[6])
            epoch = ephem.date(2453157.50000 - ephem.julian_date(0))
            kbo._epoch_M = epoch
            kbo._epoch = epoch
            date = ephem.date(at_date)
            kbo.compute(date)
            counter += 1

            # ## only keep objects that are brighter than limit
            if (kbo.mag < maglimit):
                kbos.append(kbo)
                if arrays:
                    ra.append(kbo.ra)
                    dist.append(kbo.sun_distance)
                    hlat.append(kbo.hlat)

    print '%d synthetic model kbos brighter than %d retained from %d in L7 model'.format(
        len(kbos), maglimit, counter)
    if not arrays:
        return kbos
    else:
        return ra, dist, hlat
Exemple #12
0
def parse(name, subs, path=SUBMITTED):
    filename = ''
    for sub in subs:
        if sub.__contains__(name):
            filename = path+sub
    filehandle = storage.open_vos_or_local(filename, "rb")
    filestr = filehandle.read()
    filehandle.close()
    input_mpc_lines = filestr.split('\n')

    mpc_observations = []
    for line in input_mpc_lines:
        mpc_observation = mpc.Observation.from_string(line)
        if mpc_observation is not None:
            mpc_observations.append(mpc_observation)
    mpc_observations.sort(key=lambda obs: obs.date.jd)
    length_of_observation_arc = mpc_observations[-1].date.jd - mpc_observations[0].date.jd
    orbit = Orbfit(mpc_observations)

    return length_of_observation_arc, orbit
Exemple #13
0
def build(filename):
    filehandle = storage.open_vos_or_local(filename, "rb")
    filestr = filehandle.read()
    filehandle.close()

    input_mpc_lines = filestr.split('\n')

    mpc_observations = []
    mag = []
    for line in input_mpc_lines:
        mpc_observation = mpc.Observation.from_string(line)
        if mpc_observation is not None:
            if mpc_observation.mag is not None and mpc_observation.mag > 0: 
                mag.append(mpc_observation.mag)
                
            mpc_observations.append(mpc_observation)

    mpc_observations.sort(key=lambda obs: obs.date.jd)
    orbit = Orbfit(mpc_observations)

    mag = numpy.array(mag)
    print orbit.residuals

    print "{:10s} {:7.2f} {:7.2f} {:7.3f} {:7.3f} {:7.3f} {:7.3f} {:7.3f} {:7.3f} {:7.3f}".format(orbit.name, orbit.a, orbit.da, orbit.e, orbit.de, orbit.inc, orbit.dinc, orbit.distance, mag.mean(), mag.std())
Exemple #14
0
def match_planted(fk_candidate_observations, match_filename, bright_limit=BRIGHT_LIMIT, object_planted=OBJECT_PLANTED,
                  minimum_bright_detections=MINIMUM_BRIGHT_DETECTIONS, bright_fraction=MINIMUM_BRIGHT_FRACTION):
    """
    Using the fk_candidate_observations as input get the Object.planted file from VOSpace and match
    planted sources with found sources.

    The Object.planted list is pulled from VOSpace based on the standard file-layout and name of the
    first exposure as read from the .astrom file.

    :param fk_candidate_observations: name of the fk*reals.astrom file to check against Object.planted
    :param match_filename: a file that will contain a list of all planted sources and the matched found source

    """

    found_pos = []
    detections = fk_candidate_observations.get_sources()
    for detection in detections:
        reading = detection.get_reading(0)
        # create a list of positions, to be used later by match_lists
        found_pos.append([reading.x, reading.y])

    # Now get the Object.planted file, either from the local FS or from VOSpace.
    objects_planted_uri = object_planted
    if not os.access(objects_planted_uri, os.F_OK):
        objects_planted_uri = fk_candidate_observations.observations[0].get_object_planted_uri()
    lines = storage.open_vos_or_local(objects_planted_uri).read()

    # we are changing the format of the Object.planted header to be compatible with astropy.io.ascii but
    # there are some old Object.planted files out there so we do these string/replace calls to reset those.
    new_lines = lines.replace("pix rate", "pix_rate")
    new_lines = new_lines.replace("""''/h rate""", "sky_rate")
    rdr = ascii.get_reader(Reader=ascii.CommentedHeader)
    planted_objects_table = rdr.read(new_lines)

    # The match_list method expects a list that contains a position, not an x and a y vector, so we transpose.
    planted_pos = numpy.transpose([planted_objects_table['x'].data, planted_objects_table['y'].data])

    # match_idx is an order list.  The list is in the order of the first list of positions and each entry
    # is the index of the matching position from the second list.
    (match_idx, match_fnd) = util.match_lists(numpy.array(planted_pos), numpy.array(found_pos))
    assert isinstance(match_idx, numpy.ma.MaskedArray)
    assert isinstance(match_fnd, numpy.ma.MaskedArray)

    false_positives_table = Table()

    # Once we've matched the two lists we'll need some new columns to store the information in.
    # these are masked columns so that object.planted entries that have no detected match are left 'blank'.
    new_columns = [MaskedColumn(name="measure_x", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_y", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_rate", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_angle", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_mag1", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_merr1", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_mag2", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_merr2", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_mag3", length=len(planted_objects_table), mask=True),
                   MaskedColumn(name="measure_merr3", length=len(planted_objects_table), mask=True)]
    planted_objects_table.add_columns(new_columns)
    tlength = 0
    new_columns = [MaskedColumn(name="measure_x", length=tlength, mask=True),
                   MaskedColumn(name="measure_y", length=tlength, mask=True),
                   MaskedColumn(name="measure_rate", length=0, mask=True),
                   MaskedColumn(name="measure_angle", length=0, mask=True),
                   MaskedColumn(name="measure_mag1", length=0, mask=True),
                   MaskedColumn(name="measure_merr1", length=0, mask=True),
                   MaskedColumn(name="measure_mag2", length=0, mask=True),
                   MaskedColumn(name="measure_merr2", length=0, mask=True),
                   MaskedColumn(name="measure_mag3", length=tlength, mask=True),
                   MaskedColumn(name="measure_merr3", length=tlength, mask=True)]
    false_positives_table.add_columns(new_columns)
    print len(false_positives_table)

    # We do some 'checks' on the Object.planted match to diagnose pipeline issues.  Those checks are made using just
    # those planted sources we should have detected.
    bright = planted_objects_table['mag'] < bright_limit
    n_bright_planted = numpy.count_nonzero(planted_objects_table['mag'][bright])

    for idx in range(len(match_idx)):
        # The match_idx value is False if nothing was found.
        if not match_idx.mask[idx]:
            # Each 'source' has multiple 'readings'
            measures = detections[match_idx[idx]].get_readings()
            planted_objects_table[idx] = measure_mags(measures, planted_objects_table[idx])

    for idx in range(len(match_fnd)):
        if match_fnd.mask[idx]:
            measures = detections[idx].get_readings()
            false_positives_table.add_row()
            false_positives_table[-1] = measure_mags(measures, false_positives_table[-1])

    # Count an object as detected if it has a measured magnitude in the first frame of the triplet.
    n_bright_found = numpy.count_nonzero(planted_objects_table['measure_mag1'][bright])
    # Also compute the offset and standard deviation of the measured magnitude from that planted ones.
    offset = numpy.mean(planted_objects_table['mag'][bright] - planted_objects_table['measure_mag1'][bright])
    try:
        offset = "{:5.2f}".format(offset)
    except:
        offset = "indef"

    std = numpy.std(planted_objects_table['mag'][bright] - planted_objects_table['measure_mag1'][bright])
    try:
        std = "{:5.2f}".format(std)
    except:
        std = "indef"


    if os.access(match_filename, os.R_OK):
        fout = open(match_filename, 'a')
    else:
        fout = open(match_filename, 'w')

    fout.write("#K {:10s} {:10s}\n".format("EXPNUM", "FWHM"))
    for measure in detections[0].get_readings():
        fout.write('#V {:10s} {:10s}\n'.format(measure.obs.header['EXPNUM'], measure.obs.header['FWHM']))

    fout.write("#K ")
    for keyword in ["RMIN", "RMAX", "ANGLE", "AWIDTH"]:
        fout.write("{:10s} ".format(keyword))
    fout.write("\n")

    fout.write("#V ")
    for keyword in ["RMIN", "RMAX", "ANGLE", "AWIDTH"]:
        fout.write("{:10s} ".format(fk_candidate_observations.sys_header[keyword]))
    fout.write("\n")

    fout.write("#K ")
    for keyword in ["NBRIGHT", "NFOUND", "OFFSET", "STDEV"]:
        fout.write("{:10s} ".format(keyword))
    fout.write("\n")
    fout.write("#V {:<10} {:<10} {:<10} {:<10}\n".format(n_bright_planted,
                                                         n_bright_found,
                                                         offset,
                                                         std))

    fpout = storage.open_vos_or_local(match_filename+".fp", 'a')
    try:
        writer = ascii.FixedWidth
        # add a hash to the start of line that will have header columns: for JMP
        fout.write("#")
        ascii.write(planted_objects_table, output=fout, Writer=writer, delimiter=None)
        fpout.write("#")
        if len(false_positives_table) > 0:
            ascii.write(false_positives_table, output=fpout, Writer=writer, delimiter=None)
        else:
            fpout.write(" no false positives\n")
    except Exception as e:
        print e
        print str(e)
        raise e
    finally:
        fout.close()
        fpout.close()

    # Some simple checks to report a failure how we're doing.
    if n_bright_planted < minimum_bright_detections:
        raise RuntimeError(1, "Too few bright objects planted.")

    if n_bright_found / float(n_bright_planted) < bright_fraction:
        raise RuntimeError(2, "Too few bright objects found.")

    return "{} {} {} {}".format(n_bright_planted, n_bright_found, offset, std)
Exemple #15
0
    return



if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('mpc_file',help="An MPC file to update.")
    parser.add_argument('cor_file',help="Corrected MPC file")

    args = parser.parse_args()
    out_mags = []
    out_merrs = []
    in_mags = []
    in_merrs = []
    fptr = storage.open_vos_or_local(args.mpc_file)
    optr = storage.open_vos_or_local(args.cor_file, mode='w')
    lines = fptr.read().split('\n')
    for line in lines:
        if not len(line) > 0:
            continue
        mpc_in = mpc.Observation.from_string(line)
        mpc_obs = remeasure(mpc_in)
        optr.write(mpc_obs.to_string()+"\n")
        if not mpc_obs.comment.PNote[0] == "Z" and str(mpc_obs.note1) not in ["I", "H"]:
            out_mags.append(mpc_obs.mag)
            in_mags.append(mpc_in.mag)
            in_merrs.append(mpc_in.mag_err)
            out_merrs.append(mpc_obs.mag_err)
    optr.close()
    fptr.close()
Exemple #16
0
    if opt.PREP is not None:
        opt.programs = opt.PREP

    if opt.programs is None or not len(opt.programs) > 0:
        parser.error(
            "Must specify at least one program group to clear tags for.")

    ccds = opt.PREP is not None and [36] or ccds

    block_name = opt.block[-1]
    block_semester = opt.block[:-1]
    vos_filename = 'vos:OSSOS/triplets/{}_{}_discovery_expnums.txt'.format(
        block_name, block_semester)
    print(vos_filename)

    triplist = storage.open_vos_or_local(vos_filename, 'r').read().split('\n')

    ops = []
    for program in opt.programs:
        ops.extend(program)
    for line in triplist:
        v = line.split()
        if len(v) < 3:
            continue
        sys.stderr.write("{} ".format(v[-1]))
        field = v[3]
        if "L+0-1" in field:
            continue
        result = check_tags(v[0], ops, ccds, dry_run=opt.dry_run)
        for ccd in result:
            print(line, ccd, ccd)
from ossos import storage

# Had to resort to lookup table as everything got non-standard from L block onward. Ah well.
# also H block seem to not have 'O14BH' at start yet. Check those after a release exists.
paths = {
    'O13AE': '2013A-E',  # where are all the E block .mpc files?
    'O13AO': '2013A-O',
    'O13BL': '2013B-L_redo',
    'O14BH': '2015B-H',
}
files = {}
triplets = {}

designations = {}
# first designation is the most advanced one/matches the release name, so use that as the key
for line in storage.open_vos_or_local(parameters.IDX).read().split('\n'):
    names = line.split()
    if len(names) > 0:
        designations[names[0]] = names[1:]

discoveries = parsers.ossos_release_parser(table=True)
with open('/Users/michele/Desktop/13Adiscoveries.txt', 'w') as outfile:
    outfile.write('{:<12} {:<12} {:<12} {:<12} {:<20} {:<20}\n'.format(
        'Object', 'Provisional', 'First-accept', 'Chip', 'Q-observer',
        'Q-coord'))
    for discovery in discoveries['object']:
        # get older designations, which would have been assigned as the original discovery name
        former_names = designations[discovery]
        # cf. ossos.naming.ProvisionalNameGenerator()
        provisional_name = [
            name for name in former_names
Exemple #18
0
from ossos import storage

# Had to resort to lookup table as everything got non-standard from L block onward. Ah well.
# also H block seem to not have 'O14BH' at start yet. Check those after a release exists.
paths = {
    'O13AE': '2013A-E',  # where are all the E block .mpc files?
    'O13AO': '2013A-O',
    'O13BL': '2013B-L_redo',
    'O14BH': '2015B-H',
}
files = {}
triplets = {}

designations = {}
# first designation is the most advanced one/matches the release name, so use that as the key
for line in storage.open_vos_or_local(parameters.IDX).read().split('\n'):
    names = line.split()
    if len(names) > 0:
        designations[names[0]] = names[1:]

discoveries = parsers.ossos_release_parser(table=True)
with open('/Users/michele/Desktop/13Adiscoveries.txt', 'w') as outfile:
    outfile.write('{:<12} {:<12} {:<12} {:<12} {:<20} {:<20}\n'.format(
        'Object', 'Provisional', 'First-accept', 'Chip', 'Q-observer', 'Q-coord'))
    for discovery in discoveries['object']:
        # get older designations, which would have been assigned as the original discovery name
        former_names = designations[discovery]
        # cf. ossos.naming.ProvisionalNameGenerator()
        provisional_name = filter(lambda name: (name.startswith('O1') and name.isupper()), former_names)[0]
        outfile.write('{:<12} {:<12}'.format(discovery, provisional_name))
Exemple #19
0
Fichier : snr.py Projet : OSSOS/MOP
 def __init__(self, expnum, ccd):
     self.expnum = expnum
     self.ccd = ccd
     self.apcor_array = storage.open_vos_or_local(self.uri).read().split()
    ccds = opt.ccd is not None and opt.ccd or ALL_CCDS

    if opt.PREP is not None:
        opt.programs = opt.PREP

    if opt.programs is None or not len(opt.programs) > 0:
        parser.error(
            "Must specify at least one program group to clear tags for.")

    ccds = opt.PREP is not None and [36] or ccds

    block_name = opt.block[-1]
    block_semester = opt.block[:-1]

    triplist = storage.open_vos_or_local(
        'vos:OSSOS/triplets/{}_{}_discovery_expnums.txt'.format(
            block_name, block_semester), 'r').read().split('\n')

    ops = []
    for program in opt.programs:
        ops.extend(program)
    for line in triplist:
        v = line.split()
        if len(v) < 3:
            continue
        field = v[3]
        if "L+0-1" in field:
            continue
        result = check_tags(v[0], ops, ccds, dry_run=opt.dry_run)
        print field, result
Exemple #21
0
ec = [ephem.Ecliptic(x, y) for (x, y) in np.array((cc_lon, cc_lat)).transpose()]
eq = [ephem.Equatorial(coord) for coord in ec]
ax.plot([math.degrees(coord.ra) for coord in eq],
        [math.degrees(coord.dec) for coord in eq],
        '.r',
        lw=1,
        alpha=0.7)
ax.text(25, 6, 'Chiang_Choi', fontdict={'color': 'r'})


## build a list of Synthetic KBOs that will be in the discovery fields.
print "LOADING SYNTHETIC MODEL KBOS FROM: {}".format(L7MODEL)
ra = []
dec = []
kbos = []
lines = storage.open_vos_or_local(L7MODEL).read().split('\n')

# Look for synthetic KBOs that are in the field on this date.
discovery_date = ephem.date(newMoons[DISCOVERY_NEW_MOON])
plot_date = ephem.date(newMoons[PLOT_FIELD_EPOCH])

for line in lines:
    if len(line) == 0 or line[0] == '#':  # skip initial column descriptors and the final blank line
        continue
    kbo = ephem.EllipticalBody()
    values = line.split()
    kbo._a = float(values[0])
    kbo._e = float(values[1])
    kbo._inc = float(values[2])
    kbo._Om = float(values[3])
    kbo._om = float(values[4])
Exemple #22
0
 def __init__(self, expnum, ccd):
     self.expnum = expnum
     self.ccd = ccd
     self.apcor_array = storage.open_vos_or_local(self.uri).read().split()
Exemple #23
0
def match_planted(astrom_filename, match_filename, false_positive_filename):
    """
    Using the astrom_filename as input get the Object.planted file from VOSpace and match
    planted sources with found sources.

    The Object.planted list is pulled from VOSpace based on the standard file-layout and name of the
    first exposure as read from the .astrom file.

    :param astrom_filename: name of the fk*reals.astrom file to check against Object.planted
    :param match_filename: a file that will contain a list of all planted sources and the matched found source
    :param false_positive_filename: .astrom format output containing input objects that had no match in planted

    """
    image_slice_downloader = ImageCutoutDownloader(slice_rows=100, slice_cols=100)


    fk_candidate_observations = astrom.parse(astrom_filename)
    matches_fptr = storage.open_vos_or_local(match_filename,'w')

    objects_planted_uri = fk_candidate_observations.observations[0].get_object_planted_uri()


    objects_planted = image_slice_downloader.download_raw(objects_planted_uri, view='data').split('\n')

    planted_objects = []

    for line in objects_planted[1:]:
        if len(line) == 0 or line[0] == '#':
            continue
        planted_objects.append(PlantedObject(line))

    false_positives_stream_writer = None

    matches_fptr.write("#{}\n".format(fk_candidate_observations.observations[0].rawname))
    matches_fptr.write("{:1s}{} {:>8s} {:>8s} {:>8s} {:>8s} {:>8s} {:>8s} {:>8s}\n".format(
        "",objects_planted[0],"x_dao","y_dao","mag_dao","merr_dao", "rate_mes", "ang_mes", "dr_pixels" ))

    found_idxs = []
    for source in  fk_candidate_observations.get_sources():
        reading = source.get_reading(0)
        third  = source.get_reading(2)

        cutout = image_slice_downloader.download_cutout(reading, needs_apcor=True)

        try:
            (x, y, mag, merr) = cutout.get_observed_magnitude()
        except TaskError as e:
            logger.warning(str(e))
            mag = 0.0
            merr = -1.0


        matched = None
        for idx in range(len(planted_objects)):
            planted_object = planted_objects[idx]
            dist = math.sqrt((reading.x-planted_object.x)**2 + (reading.y - planted_object.y)**2)
            if matched is None or dist < matched:
                matched = dist
                matched_object_idx = idx

        start_jd = Time(reading.obs.header['MJD_OBS_CENTER'],format='mpc', scale='utc').jd
        end_jd = Time(third.obs.header['MJD_OBS_CENTER'], format='mpc', scale='utc').jd
        exptime = float(reading.obs.header['EXPTIME'])

        rate = math.sqrt((third.x - reading.x)**2 + (third.y - reading.y)**2)/(
            24*(end_jd - start_jd) )
        angle = math.degrees(math.atan2(third.y - reading.y,third.x - reading.x))

        if matched > 3*rate*exptime/3600.0 and False :
            # this is a false positive (candidate not near artificial source)
            # create a .astrom style line for feeding to validate for checking later
            if false_positives_ftpr is None or false_positives_stream_writer is None:
                # create false positive file for storing results
                false_positives_ftpr = open(false_positive_filename,'w+')
                false_positives_stream_writer = StreamingAstromWriter(
                    false_positives_ftpr,fk_candidate_observations.sys_header)
            false_positives_stream_writer.write_source(source)
            false_positives_ftpr.flush()
            continue
        elif matched_object_idx in found_idxs:
            repeat = '#'
        else:
            repeat = ' '
            found_idxs.append(matched_object_idx)

        mags = []
        merrs = []
        for this_reading in source.get_readings()[1:]:
            cutout = image_slice_downloader.download_cutout(this_reading, needs_apcor=True)

            try:
                (this_x, this_y, this_mag, this_merr) = cutout.get_observed_magnitude()
            except TaskError as e:
                logger.warning(str(e))
                this_mag = 0.0
                this_merr = -1.0

            mags.append(this_mag)
            merrs.append(this_merr)

        matches_fptr.write("{:1s}{} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} ".format(
            repeat,
            str(planted_objects[matched_object_idx]), reading.x, reading.y, mag, merr, rate, angle, matched))
        for idx in range(len(mags)):
            matches_fptr.write("{:8.2f} {:8.2f}".format(mags[idx], merrs[idx]))
        matches_fptr.write("\n")


    # close the false_positives
    if false_positives_ftpr is not None:
        false_positives_ftpr.close()

    # record the unmatched Object.planted entries, for use in efficiency computing
    for idx in range(len(planted_objects)):
        if idx not in found_idxs:
            planted_object = planted_objects[idx]
            matches_fptr.write("{:1s}{} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f} {:8.2f}\n".format("",str(planted_object),
                                                                          0, 0, 0, 0, 0, 0, 0))
    matches_fptr.close()
Exemple #24
0
    def parse(self, ssos_result_filename_or_lines):
        """
        given the result table create 'source' objects.

        :type ssos_result_table: Table
        :param ssos_result_table:
        """
        table_reader = ascii.get_reader(Reader=ascii.Basic)
        table_reader.inconsistent_handler = self._skip_missing_data
        table_reader.header.splitter.delimiter = '\t'
        table_reader.data.splitter.delimiter = '\t'
        table = table_reader.read(ssos_result_filename_or_lines)

        sources = []
        observations = []
        source_readings = []

        ref_pvwcs = None
        downloader = Downloader()
        warnings.filterwarnings('ignore')

        for row in table:
            # check if a dbimages object exists
            ccd = int(row['Ext']) - 1
            expnum = row['Image'].rstrip('p')

            # ADDING THIS TEMPORARILY TO GET THE NON-OSSOS DATA OUT OF THE WAY WHILE DEBUGGING
            if (row['Telescope_Insturment'] != 'CFHT/MegaCam') or (row['Filter'] != 'r.MP9601'):
                continue

            # it's fine for OSSOS, go get the image
            image_uri = storage.dbimages_uri(expnum=expnum,
                                             ccd=None,
                                             version='p',
                                             ext='.fits',
                                             subdir=None)
            logger.info('Trying to access %s\n%s' % (row.data, image_uri))

            if not storage.exists(image_uri, force=False):
                logger.warning('Image not in dbimages? Trying subdir.')
                image_uri = storage.dbimages_uri(expnum=expnum,
                                                 ccd=ccd,
                                                 version='p')

                if not storage.exists(image_uri, force=False):
                    logger.warning("Image doesn't exist in ccd subdir. %s" % image_uri)
                    continue

            if row['X'] == -9999 or row['Y'] == -9999 :
                logger.warning("Skipping %s as x/y not resolved." % ( row['Image']))
                continue

            mopheader_uri = storage.dbimages_uri(expnum=expnum,
                                                 ccd=ccd,
                                                 version='p',
                                                 ext='.mopheader')

            if not mopheader_uri in mopheaders:
                if not storage.exists(mopheader_uri, force=False):
                    logger.warning('mopheader missing, but images exists')
                    continue

                # raise flag if no MOPHEADER
                mopheader_fpt = cStringIO.StringIO(storage.open_vos_or_local(mopheader_uri).read())
                mopheader = fits.open(mopheader_fpt)
                mopheaders[mopheader_uri] = mopheader
            mopheader = mopheaders[mopheader_uri]
            
            # Build astrom.Observation
            observation = astrom.Observation(expnum=str(expnum),
                                             ftype='p',
                                             ccdnum=str(ccd),
                                             fk="")

            observation.rawname = os.path.splitext(os.path.basename(image_uri))[0]+str(ccd).zfill(2)

            observation.header = mopheader[0].header
            MJD_OBS_CENTER = mpc.Time(observation.header['MJD-OBSC'],
                                      format='mjd',
                                      scale='utc', precision=5 ).replicate(format='mpc')
            observation.header['MJD_OBS_CENTER'] = str(MJD_OBS_CENTER)
            observation.header['MAXCOUNT'] = MAXCOUNT
            observation.header['SCALE'] = observation.header['PIXSCALE']
            #observation.header['CHIP'] = str(observation.header['CHIPNUM']).zfill(2)
            observation.header['NAX1'] = observation.header['NAXIS1']
            observation.header['NAX2'] = observation.header['NAXIS2']
            observation.header['MOPversion'] = observation.header['MOP_VER']
            observation.header['FWHM'] = 4



            # a download pixel 1,1 of this data to due offsets with.
            x_cen = int(min(max(1,row['X']),observation.header['NAX1']))
            y_cen = int(min(max(1,row['Y']),observation.header['NAX2']))
            if image_uri not in astheaders:
               hdulist = downloader.download_hdulist(
                   uri=image_uri,
                   view='cutout',
                   cutout='[{}][{}:{},{}:{}]'.format(ccd+1, x_cen, x_cen, y_cen, y_cen))
               astheaders[image_uri] = hdulist
            hdulist = astheaders[image_uri]

            pvwcs = wcs.WCS(hdulist[0].header)
            (ra,dec)  = pvwcs.xy2sky(x_cen, y_cen)
            if ref_pvwcs is None:
                ref_pvwcs = pvwcs
                xref = row['X']
                yref = row['Y']
            (x0, y0) = ref_pvwcs.sky2xy(ra,dec)
            x0 += row['X'] - x_cen
            y0 += row['Y'] - y_cen

            # Build astrom.SourceReading
            observations.append(observation)

            from_input_file = observation.rawname in self.input_rawnames
            null_observation = observation.rawname in self.null_observations

            print observation.rawname, observation.header['MJD_OBS_CENTER'], null_observation, from_input_file

            source_reading = astrom.SourceReading(x=row['X'], y=row['Y'],
                                                        xref=xref, yref=yref,
                                                        x0=x0, y0=y0,
                                                        ra=row['Object_RA'], dec=row['Object_Dec'],
                                                        obs=observation,
                                                        ssos=True,
                                                        from_input_file=from_input_file,
                                                        null_observation=null_observation)
            #if observation.rawname in  self.input_rawnames:
            #    source_readings.insert(0, source_reading)
            #else:
            source_readings.append(source_reading)
        # build our array of SourceReading objects
        sources.append(source_readings)

        warnings.filterwarnings('once')

        return SSOSData(observations, sources, self.provisional_name)
Exemple #25
0
    ccds = opt.ccd is not None and opt.ccd or ALL_CCDS

    if opt.PREP is not None:
        opt.programs = opt.PREP

    if opt.programs is None or not len(opt.programs) > 0:
        parser.error("Must specify at least one program group to clear tags for.")

    ccds = opt.PREP is not None and [36] or ccds

    block_name = opt.block[-1]
    block_semester = opt.block[:-1]
    vos_filename = 'vos:OSSOS/triplets/{}_{}_discovery_expnums.txt'.format(block_name, block_semester)
    print vos_filename

    triplist = storage.open_vos_or_local(vos_filename,'r').read().split('\n')
    

    ops = []
    for program in opt.programs:
        ops.extend(program)
    for line in triplist:
        v = line.split()
        if len(v) < 3:
            continue
        sys.stderr.write("{} ".format(v[-1]))
        field = v[3]
        if "L+0-1" in field:
            continue
        result = check_tags(v[0], ops, ccds, dry_run=opt.dry_run)
        for ccd in result:
Exemple #26
0
"""
Correct the mistake where the planted images linked to the non-scrambled zeropoint/apcor file.
"""
import sys

__author__ = 'jjk'


from ossos import storage


triples=storage.open_vos_or_local('vos:OSSOS/triplets/E_13A_discovery_expnums.txt').read()
for line in triples.split('\n'):
    line = line.strip(' ')
    if not len(line) > 0:
        continue
    for expnum in line.split(' ')[0:3]:
        for ccd in range(36):
            for ext in ['.apcor',
                        '.zeropoint.used',
                        '.psf.fits',
                        '.mopheader',
                        '.fwhm',
                        '.trans.jmp']:
                storage.delete(expnum=expnum, ccd=ccd, version='s', ext=ext, prefix='fk')
                storage.vlink(s_expnum=expnum,
                            s_ccd=ccd,
                            s_version='s',
                            s_prefix='',
                            s_ext=ext,
                            l_expnum=expnum,
Exemple #27
0
    def load_pointings(self, filename=None):
        """Load some pointings"""

        filename = (filename is None and tkFileDialog.askopenfilename()
                    or filename)

        if filename is None:
            return

        f = storage.open_vos_or_local(filename)
        lines = f.readlines()
        f.close()
        points = []
        if lines[0][0:5] == "<?xml":
            # ## assume astrores format
            # ## with <DATA at start of 'data' segment
            for i in range(len(lines)):
                if lines[i][0:5] == '<DATA':
                    break
            for j in range(i + 5, len(lines)):
                if lines[j][0:2] == "]]":
                    break
                vs = lines[j].split('|')
                points.append(vs)
        elif lines[0][0:5] == 'index':
            # ## Palomar Format
            # ## OK.. ID/NAME/RA /DEC format
            for line in lines:
                if line[0] == '!' or line[0:5] == 'index':
                    # index is a header line for Palomar
                    continue
                d = line.split()
                if len(d) != 9:
                    sys.stderr.write("Don't understand pointing format\n%s\n" %
                                     line)
                    continue
                ras = "%s:%s:%s" % (d[2], d[3], d[4])
                decs = "%s:%s:%s" % (d[5], d[6], d[7])
                points.append((d[1].strip(), ras, decs))
        elif lines[0][0:5] == "#SSIM":
            # ## Survey Simulator format
            for line in lines[1:]:
                d = line.split()
                points.append((d[8], d[2], d[3]))
        else:
            # ## try name/ ra /dec / epoch
            for line in lines:
                d = line.split()
                if len(d) != 4:
                    if len(d) != 8:
                        sys.stderr.write(
                            "Don't understand pointing format\n%s\n" % (line))
                        continue
                    line = "%s %s:%s:%s %s:%s:%s %s" % (d[0], d[1], d[2], d[3],
                                                        d[4], d[5], d[6], d[7])
                    d = line.split()

                f = d[1].count(":")
                if (f > 0):
                    points.append((d[0], d[1], d[2]))
                else:
                    points.append(('', math.radians(float(d[1])),
                                   math.radians(float(d[2]))))

        self.plot_points_list(points)
        return
Exemple #28
0
    opt = parser.parse_args()
    ccds = opt.ccd is not None and opt.ccd or ALL_CCDS

    storage.DBIMAGES = opt.dbimages

    block_name = opt.field[3]
    block_semester = opt.field[0:3]
    if opt.PREP is not None:
        opt.programs = opt.PREP

    if opt.programs is None or not len(opt.programs) > 0:
        parser.error("Must specify at least one program group to clear tags for.")

    ccds = opt.PREP is not None and [40] or ccds

    triplist = (
        storage.open_vos_or_local(
            "vos:OSSOS/triplets/{}_{}_discovery_expnums.txt".format(block_name, block_semester), "r"
        )
        .read()
        .split("\n")
    )

    print opt.field, opt.ccd
    for tripline in triplist:
        triplets = tripline.split()
        if opt.field in triplets:
            for expnum in triplets[0:3]:
                print expnum
                clear_tags(expnum, opt.programs, ccds, dry_run=opt.dry_run)
Exemple #29
0
    ephem.Ecliptic(x, y) for (x, y) in np.array((cc_lon, cc_lat)).transpose()
]
eq = [ephem.Equatorial(coord) for coord in ec]
ax.plot([math.degrees(coord.ra) for coord in eq],
        [math.degrees(coord.dec) for coord in eq],
        '.r',
        lw=1,
        alpha=0.7)
ax.text(25, 6, 'Chiang_Choi', fontdict={'color': 'r'})

## build a list of Synthetic KBOs that will be in the discovery fields.
print("LOADING SYNTHETIC MODEL KBOS FROM: {}".format(L7MODEL))
ra = []
dec = []
kbos = []
lines = storage.open_vos_or_local(L7MODEL).read().split('\n')

# Look for synthetic KBOs that are in the field on this date.
discovery_date = ephem.date(newMoons[DISCOVERY_NEW_MOON])
plot_date = ephem.date(newMoons[PLOT_FIELD_EPOCH])

for line in lines:
    if len(line) == 0 or line[
            0] == '#':  # skip initial column descriptors and the final blank line
        continue
    kbo = ephem.EllipticalBody()
    values = line.split()
    kbo._a = float(values[0])
    kbo._e = float(values[1])
    kbo._inc = float(values[2])
    kbo._Om = float(values[3])
Exemple #30
0
    def load_pointings(self, filename=None):
        """Load some pointings"""

        filename = ( filename is None and tkFileDialog.askopenfilename() or filename)

        if filename is None:
            return

        f = storage.open_vos_or_local(filename)
        lines = f.readlines()
        f.close()
        points = []
        if lines[0][0:5] == "<?xml":
            # ## assume astrores format
            # ## with <DATA at start of 'data' segment
            for i in range(len(lines)):
                if lines[i][0:5] == '<DATA':
                    break
            for j in range(i + 5, len(lines)):
                if lines[j][0:2] == "]]":
                    break
                vs = lines[j].split('|')
                points.append(vs)
        elif lines[0][0:5] == 'index':
            # ## Palomar Format
            # ## OK.. ID/NAME/RA /DEC format
            v = lines[0].split()
            if len(v) == 2 :
                date = v[1]
                self.date.set(v[1])
                self.reset()
            for line in lines:
                if line[0] == '!' or line[0:5] == 'index':
                    # index is a header line for Palomar
                    continue
                d = line.split()
                if len(d) < 9:
                    sys.stderr.write("Don't understand pointing format\n%s\n" % line)
                    continue
                ras = "%s:%s:%s" % (d[2], d[3], d[4])
                decs = "%s:%s:%s" % (d[5], d[6], d[7])
                points.append((d[1].strip(), ras, decs))
        elif lines[0][0:5] == "#SSIM":
            # ## Survey Simulator format
            for line in lines[1:]:
                d = line.split()
                points.append((d[8], d[2], d[3]))
        else:
            # ## try name/ ra /dec / epoch
            for line in lines:
                d = line.split()
                if len(d) == 5:  # brave assumption time!
                    # self.pointing_format = 'Subaru'  # unfortunately this doesn't seem to do anything, & breaks save
                    pointing_name = d[0].split('=')[0]
                    # oh grief these are sexagecimal with no separators. WHY
                    ra = d[1].split('=')[1]
                    dec = d[2].split('=')[1]
                    if len(ra.split('.')[0]) == 5:  # LACK OF SEPARATORS ARGH
                        ra = '0' + ra
                    if len(dec.split('.')[0]) == 5:
                        dec = '0' + dec
                    ra = "{}:{}:{}".format(ra[0:2], ra[2:4], ra[4:])
                    dec = "{}:{}:{}".format(dec[0:2], dec[2:4], dec[4:])
                    points.append((pointing_name, ra, dec))
                elif len(d) == 4:
                    f = d[1].count(":")
                    if ( f > 0 ):
                        points.append((d[0], d[1], d[2]))
                    else:
                        points.append(('', math.radians(float(d[1])), math.radians(float(d[2]))))
                elif len(d) == 8:
                    line = "%s %s:%s:%s %s:%s:%s %s" % (d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7] )
                    d = line.split()
                    # this one seems unfinished...no append
                else:
                    sys.stderr.write("Don't understand pointing format\n%s\n" % ( line))
                    continue

        self.plot_points_list(points)
        return