예제 #1
0
    def save_packets(self):

        for packet in self._open_avro():
            print(f"working on {packet['objectId']}")

            do_process = True
            if self.only_pure and not self._is_alert_pure(packet):
                do_process = False

            if not do_process:
                print(f"{self.fname}: not pure. Skipping")
                continue

            s = Source.query.filter(Source.id == packet["objectId"]).first()
            if s:
                print("Found: an existing source with id = " +
                      packet["objectId"])
                source_is_varstar = s.varstar in [True]
                if not self.clobber and s.origin == f"{os.path.basename(self.fname)}":
                    print(
                        f"already added this source with this avro packet {os.path.basename(self.fname)}"
                    )
                    continue

            # make a dataframe and save the source/phot
            dflc = self._make_dataframe(packet)

            source_info = {
                'id': packet["objectId"],
                'ra': packet["candidate"]["ra"],
                'dec': packet["candidate"]["dec"],
                'ra_dis': packet["candidate"]["ra"],
                'dec_dis': packet["candidate"]["dec"],
                'dist_nearest_source': packet["candidate"].get("distnr"),
                'mag_nearest_source': packet["candidate"].get("magnr"),
                'e_mag_nearest_source': packet["candidate"].get("sigmagnr"),
                'sgmag1': packet["candidate"].get("sgmag1"),
                'srmag1': packet["candidate"].get("srmag1"),
                'simag1': packet["candidate"].get("simag1"),
                'objectidps1': packet["candidate"].get("objectidps1"),
                'sgscore1': packet["candidate"].get("sgscore1"),
                'distpsnr1': packet["candidate"].get("distpsnr1"),
                'score': packet['candidate']['rb']
            }

            if s is None:
                s = Source(**source_info,
                           origin=f"{os.path.basename(self.fname)}",
                           groups=[self.ztfpack.g])
                source_is_varstar = False
                new_source = True
            else:
                print("Found an existing source with id = " +
                      packet["objectId"])
                new_source = False

            # let's see if we have already
            comments = Comment.query.filter(Comment.source_id == packet["objectId"]) \
                                    .filter(Comment.origin == f"{os.path.basename(self.fname)}")

            skip = False
            if self.clobber:
                if comments.count() > 0:
                    print("removing preexisting comments from this packet")
                    comments.delete()
                    DBSession().commit()
            else:
                if comments.count() > 0:
                    skip = True

            if not skip:
                print(f"packet id: {packet['objectId']}")
                if new_source:
                    s.comments = [Comment(text=comment, source_id=packet["objectId"],
                                  user=self.ztfpack.group_admin_user,
                                  origin=f"{os.path.basename(self.fname)}")
                                  for comment in ["Added by ztf_upload_avro", \
                                              f"filename = {os.path.basename(self.fname)}"]]
                else:
                    comment_list = [Comment(text=comment, source_id=packet["objectId"],
                                    user=self.ztfpack.group_admin_user,
                                    origin=f"{os.path.basename(self.fname)}")
                                    for comment in ["Added by ztf_upload_avro", \
                                              f"filename = {os.path.basename(self.fname)}"]]

            photdata = []
            varstarness = []

            ssdistnr = packet["candidate"].get("ssdistnr")

            is_roid = False
            if packet["candidate"].get("isdiffpos", 'f') in ["1", "t"]:
                if not ((ssdistnr is None) or (ssdistnr < 0) or
                        (ssdistnr > 5)):
                    is_roid = True

            for j, row in dflc.iterrows():
                rj = row.to_dict()
                if ((packet["candidate"].get("sgscore1", 1.0) or 1.0) >= 0.5) and \
                   ((packet["candidate"].get("distpsnr1", 10) or 10) < 1.0) or \
                    (rj.get("isdiffpos", 'f') not in ["1", "t"] and \
                     not pd.isnull(rj.get('magpsf'))):
                    if not is_roid:
                        # make sure it's not a roid
                        varstarness.append(True)
                else:
                    varstarness.append(False)

                phot = {
                    "mag": rj.pop('magpsf'),
                    "e_mag": rj.pop("sigmapsf"),
                    "lim_mag": rj.pop('diffmaglim'),
                    "filter": str(rj.pop('fid')),
                    "score": rj.pop("rb"),
                    "candid": rj.pop("candid"),
                    "isdiffpos": rj.pop("isdiffpos") in ["1", "t"],
                    'dist_nearest_source': rj.pop("distnr"),
                    'mag_nearest_source': rj.pop("magnr"),
                    'e_mag_nearest_source': rj.pop("sigmagnr")
                }
                t = Time(rj.pop("jd"), format="jd")
                phot.update({
                    "observed_at": t.iso,
                    "mjd": t.mjd,
                    "time_format": "iso",
                    "time_scale": "utc"
                })

                # calculate the variable star mag
                sign = 1.0 if phot["isdiffpos"] else -1.0
                mref = phot["mag_nearest_source"]
                mref_err = phot["e_mag_nearest_source"]
                mdiff = phot["mag"]
                mdiff_err = phot["e_mag"]

                # Three options here:
                #   diff is detected in positive (ref source got brighter)
                #   diff is detected in the negative (ref source got fainter)
                #   diff is undetected in the neg/pos (ref similar source)
                try:
                    if not pd.isnull(mdiff):
                        total_mag = -2.5 * np.log10(10**(-0.4 * mref) +
                                                    sign * 10**(-0.4 * mdiff))
                        tmp_total_mag_errs = (-2.5*np.log10(10**(-0.4*mref) +
                                              sign*10**(-0.4*(mdiff + mdiff_err))) \
                                              - total_mag,
                                              -2.5*np.log10(10**(-0.4*mref) +
                                              sign*10**(-0.4*(mdiff - mdiff_err))) \
                                              - total_mag)
                        # add errors in quadature -- geometric mean of diff err
                        # and ref err
                        total_mag_err = np.sqrt(-1.0 * tmp_total_mag_errs[0] *
                                                tmp_total_mag_errs[1] +
                                                mref_err**2)
                    else:
                        # undetected source
                        mref = packet["candidate"].get("magnr")
                        mref_err = packet["candidate"].get("sigmagnr")
                        # 5 sigma
                        diff_err = (-2.5 * np.log10(10**
                                                    (-0.4 * mref) + sign * 10**
                                                    (-0.4 * phot["lim_mag"])) -
                                    mref) / 5

                        total_mag = mref
                        total_mag_err = np.sqrt(mref_err**2 + diff_err**2)
                except:
                    #print("Error in varstar calc")
                    #print(mdiff, mref, sign, mdiff_err, packet["candidate"].get("magnr"), packet["candidate"].get("sigmagnr"))

                    total_mag = 99
                    total_mag_err = 0

                phot.update({"var_mag": total_mag, "var_e_mag": total_mag_err})

                # just keep all the remaining non-nan values for this epoch
                altdata = dict()
                for k in rj:
                    if not pd.isnull(rj[k]): altdata.update({k: rj[k]})

                phot.update({"altdata": altdata})
                photdata.append(copy.copy(phot))

            photometry = Photometry.query.filter(Photometry.source_id == packet["objectId"]) \
                                         .filter(Photometry.origin == f"{os.path.basename(self.fname)}")

            skip = False
            if self.clobber:
                if photometry.count() > 0:
                    print("removing preexisting photometry from this packet")
                    photometry.delete()
                    DBSession().commit()

            else:
                if photometry.count() > 0:
                    print(
                        "Existing photometry from this packet. Skipping addition of more."
                    )
                    skip = True

            if not skip:
                if new_source:
                    s.photometry = [
                        Photometry(instrument=self.ztfpack.i1,
                                   source_id=packet["objectId"],
                                   origin=f"{os.path.basename(self.fname)}",
                                   **row) for j, row in enumerate(photdata)
                    ]
                else:
                    phot_list = [
                        Photometry(instrument=self.ztfpack.i1,
                                   source_id=packet["objectId"],
                                   origin=f"{os.path.basename(self.fname)}",
                                   **row) for j, row in enumerate(photdata)
                    ]

            # s.spectra = []
            source_is_varstar = source_is_varstar or any(varstarness)
            s.varstar = source_is_varstar
            s.is_roid = is_roid
            s.transient = self._is_transient(dflc)

            DBSession().add(s)
            try:
                DBSession().commit()
            except:
                print("error committing DB")
                pass

            for ttype, ztftype in [('new', 'Science'), ('ref', 'Template'),
                                   ('sub', 'Difference')]:
                fname = f'{packet["candid"]}_{ttype}.png'
                gzname = f'{packet["candid"]}_{ttype}.fits.gz'

                t = Thumbnail(
                    type=ttype,
                    photometry_id=s.photometry[0].id,
                    file_uri=f'static/thumbnails/{packet["objectId"]}/{fname}',
                    origin=f"{os.path.basename(self.fname)}",
                    public_url=
                    f'/static/thumbnails/{packet["objectId"]}/{fname}')
                tgz = Thumbnail(
                    type=ttype + "_gz",
                    photometry_id=s.photometry[0].id,
                    file_uri=f'static/thumbnails/{packet["objectId"]}/{gzname}',
                    origin=f"{os.path.basename(self.fname)}",
                    public_url=
                    f'/static/thumbnails/{packet["objectId"]}/{gzname}')
                DBSession().add(t)
                stamp = packet['cutout{}'.format(ztftype)]['stampData']

                if (not os.path.exists(self.ztfpack.basedir/f'static/thumbnails/{packet["objectId"]}/{fname}') or \
                    not os.path.exists(self.ztfpack.basedir/f'static/thumbnails/{packet["objectId"]}/{gzname}')) and \
                    not self.clobber:
                    with gzip.open(io.BytesIO(stamp), 'rb') as f:
                        gz = open(f"/tmp/{gzname}", "wb")
                        gz.write(f.read())
                        gz.close()
                        f.seek(0)
                        with fits.open(io.BytesIO(f.read())) as hdul:
                            hdul[0].data = np.flip(hdul[0].data, axis=0)
                            ffig = aplpy.FITSFigure(hdul[0])
                            ffig.show_grayscale(
                                stretch='arcsinh',
                                invert=True)  #ztftype != 'Difference')
                            ffig.save(f"/tmp/{fname}")
                    if not os.path.exists(
                            self.ztfpack.basedir /
                            f'static/thumbnails/{packet["objectId"]}'):
                        os.makedirs(self.ztfpack.basedir /
                                    f'static/thumbnails/{packet["objectId"]}')
                    shutil.copy(
                        f"/tmp/{fname}", self.ztfpack.basedir /
                        f'static/thumbnails/{packet["objectId"]}/{fname}')
                    shutil.copy(
                        f"/tmp/{gzname}", self.ztfpack.basedir /
                        f'static/thumbnails/{packet["objectId"]}/{gzname}')

            try:
                s.add_linked_thumbnails()
            except:
                print("Not linking thumbnails...not on the 'net?")

            # grab the photometry for this source and update relevant quanities

            # ra, dec update
            dat = pd.read_sql(
                DBSession().query(Photometry).filter(
                    Photometry.source_id == packet["objectId"]).filter(
                        Photometry.mag < 30).statement,
                DBSession().bind)
            if not s.varstar:
                infos = [(x["altdata"]["ra"], x["altdata"]["dec"], x["mag"],
                          x["e_mag"], x["score"], x["filter"])
                         for i, x in dat.iterrows()]
            else:
                infos = [
                    (x["altdata"]["ra"], x["altdata"]["dec"], x["var_mag"],
                     x["var_e_mag"], x["score"], x["filter"])
                    for i, x in dat.iterrows()
                ]

            ndet = len(dat[~pd.isnull(dat["mag"])])
            s.detect_photometry_count = ndet
            s.last_detected = np.max(
                dat[~pd.isnull(dat["mag"])]["observed_at"])

            calc_source_data = dict()
            new_ra = np.average([x[0] for x in infos],
                                weights=[1. / x[3] for x in infos])
            new_dec = np.average([x[1] for x in infos],
                                 weights=[1. / x[3] for x in infos])
            ra_err = np.std([x[0] for x in infos])
            dec_err = np.std([x[1] for x in infos])

            calc_source_data.update(
                {"min_score": np.nanmin([x[4] for x in infos])})
            calc_source_data.update(
                {"max_score": np.nanmax([x[4] for x in infos])})

            filts = list(set([x[-1] for x in infos]))
            for f in filts:
                ii = [x for x in infos if x[-1] == f]
                rez = np.average([x[2] for x in ii],
                                 weights=[1 / x[3] for x in ii])
                if pd.isnull(rez):
                    rez = None
                md = np.nanmax([x[2]
                                for x in ii]) - np.nanmin([x[2] for x in ii])
                max_delta = md if not pd.isnull(md) else None

                calc_source_data.update(
                    {f: {
                        "max_delta": max_delta,
                        "mag_avg": rez
                    }})

            s = Source.query.get(packet["objectId"])

            altdata = dict()
            for k in calc_source_data:
                if not pd.isnull(calc_source_data[k]):
                    altdata.update({k: calc_source_data[k]})

            s.altdata = altdata
            s.ra = new_ra
            s.dec = new_dec
            s.ra_err = ra_err
            s.dec_err = dec_err

            c1 = SkyCoord(s.ra_dis * u.deg, s.dec_dis * u.deg, frame='fk5')
            c2 = SkyCoord(new_ra * u.deg, new_dec * u.deg, frame='fk5')
            sep = c1.separation(c2)
            s.offset = sep.arcsecond if not pd.isnull(sep.arcsecond) else 0.0

            # TNS
            tns = self._tns_search(s.ra_dis, s.dec_dis)
            s.tns_info = tns
            if tns["Name"]:
                s.tns_name = tns["Name"]

            # catalog search
            result_table = customSimbad.query_region(SkyCoord(
                f"{s.ra_dis}d {s.dec_dis}d", frame='icrs'),
                                                     radius='0d0m3s')
            if result_table:
                try:
                    s.simbad_class = result_table["OTYPE"][0].decode(
                        "utf-8", "ignore")

                    altdata = dict()
                    rj = result_table.to_pandas().dropna(
                        axis='columns').iloc[0].to_json()
                    s.simbad_info = rj
                except:
                    pass

            if s.simbad_class:
                comments = [
                    Comment(text=comment,
                            source_id=packet["objectId"],
                            user=self.ztfpack.group_admin_user,
                            ctype="classification",
                            origin=f"{os.path.basename(self.fname)}")
                    for comment in [f"Simbad class = {s.simbad_class}"]
                ]

            result_table = customGaia.query_region(SkyCoord(ra=s.ra_dis,
                                                            dec=s.dec_dis,
                                                            unit=(u.deg,
                                                                  u.deg),
                                                            frame='icrs'),
                                                   width="3s",
                                                   catalog=["I/345/gaia2"])
            if result_table:
                try:
                    rj = result_table.pop().to_pandas().dropna(
                        axis='columns').iloc[0].to_json()
                    s.gaia_info = rj
                except:
                    pass

            DBSession().commit()
            print("added")
예제 #2
0
            ]
        }, {
            'id': '16fil',
            'ra': 322.718872,
            'dec': 27.574113,
            'red_shift': 0.0,
            'comments': ["Frogs in the pond", "The eagle has landed"]
        }]

        (basedir / 'static/thumbnails').mkdir(parents=True, exist_ok=True)
        for source_info in SOURCES:
            comments = source_info.pop('comments')

            s = Source(**source_info, groups=[g])
            s.comments = [
                Comment(text=comment, user=group_admin_user)
                for comment in comments
            ]

            phot_file = os.path.join(
                os.path.dirname(os.path.dirname(__file__)), 'skyportal',
                'tests', 'data', 'phot.csv')
            phot_data = pd.read_csv(phot_file)
            s.photometry = [
                Photometry(instrument=i1, **row)
                for j, row in phot_data.iterrows()
            ]

            spec_file = os.path.join(
                os.path.dirname(os.path.dirname(__file__)), 'skyportal',
                'tests', 'data', 'spec.csv')
            spec_data = pd.read_csv(spec_file)
예제 #3
0
                        band='optical')
        DBSession().add_all([i1, i2])

    with status("Creating dummy sources"):
        SOURCES = [{'id': '14gqr', 'ra': 353.36647, 'dec': 33.646149, 'redshift': 0.063,
                    'comments': ["No source at transient location to R>26 in LRIS imaging",
                                 "Strong calcium lines have emerged."]},
                   {'id': '16fil', 'ra': 322.718872, 'dec': 27.574113, 'redshift': 0.0,
                    'comments': ["Frogs in the pond", "The eagle has landed"]}]

        (basedir/'static/thumbnails').mkdir(parents=True, exist_ok=True)
        for source_info in SOURCES:
            comments = source_info.pop('comments')

            s = Source(**source_info, groups=[g])
            s.comments = [Comment(text=comment, author=group_admin_user.username)
                          for comment in comments]

            phot_file = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                                     'skyportal', 'tests', 'data',
                                     'phot.csv')
            phot_data = pd.read_csv(phot_file)
            s.photometry = [Photometry(instrument=i1, **row)
                            for j, row in phot_data.iterrows()]

            spec_file = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                                     'skyportal', 'tests', 'data',
                                     'spec.csv')
            spec_data = pd.read_csv(spec_file)
            s.spectra = [Spectrum(instrument_id=int(i),
                                  observed_at=datetime.datetime(2014, 10, 24),
                                  wavelengths=df.wavelength,