Exemplo n.º 1
0
    def _process_current_lsd(self):
        # Combine the current set of files into a timestream

        lsd = self._current_lsd

        # Calculate the length of data in this current LSD
        day_length = np.sum([
            max(min(lsd + 1, self.observer.unix_to_lsd(tl.time[-1])), lsd) -
            max(min(lsd + 1, self.observer.unix_to_lsd(tl.time[0])), lsd)
            for tl in self._timestream_list
        ])

        if mpiutil.rank0:
            print "Day length %0.2f" % day_length

        # If the amount of data for this day is too small, then just skip
        if day_length < self.min_day_length:
            return None

        if self._timestream_list[0].vis.comm.rank == 0:
            print "Constructing LSD:%i [%i files]" % (
                lsd, len(self._timestream_list))

        # Construct the combined timestream
        ts = tod.concatenate(self._timestream_list)

        # Add attributes for the LSD and a tag for labelling saved files
        ts.attrs['tag'] = ('lsd_%i' % lsd)
        ts.attrs['lsd'] = lsd

        return ts
Exemplo n.º 2
0
    def _process_current_lsd(self):
        # Combine the current set of files into a timestream

        lsd = self._current_lsd

        # Calculate the length of data in this current LSD
        start = self.observer.unix_to_lsd(self._timestream_list[0].time[0])
        end = self.observer.unix_to_lsd(self._timestream_list[-1].time[-1])
        day_length = min(end, lsd + 1) - max(start, lsd)

        # If the amount of data for this day is too small, then just skip
        if day_length < self.min_day_length:
            return None

        self.log.info("Constructing LSD:%i [%i files]", lsd,
                      len(self._timestream_list))

        # Construct the combined timestream
        ts = tod.concatenate(self._timestream_list)

        # Add attributes for the LSD and a tag for labelling saved files
        ts.attrs["tag"] = "lsd_%i" % lsd
        ts.attrs["lsd"] = lsd

        return ts
Exemplo n.º 3
0
    def _finalize_transit(self):
        """Concatenate grouped time streams for the currrent transit."""

        # Find where transit starts and ends
        if len(self.tstreams) == 0 or self.cur_transit is None:
            self.log.info("Did not find any transits.")
            return None
        self.log.debug(
            "Finalising transit for {}...".format(
                ephem.unix_to_datetime(self.cur_transit)
            )
        )
        all_t = np.concatenate([ts.time for ts in self.tstreams])
        start_ind = int(np.argmin(np.abs(all_t - self.start_t)))
        stop_ind = int(np.argmin(np.abs(all_t - self.end_t)))

        # Save list of filenames
        filenames = [ts.attrs["filename"] for ts in self.tstreams]

        dt = self.tstreams[0].time[1] - self.tstreams[0].time[0]
        if dt <= 0:
            self.log.warning(
                "Time steps are not positive definite: dt={:.3f}".format(dt)
                + " Skipping."
            )
            ts = None
        if stop_ind - start_ind > int(self.min_span / 360.0 * SIDEREAL_DAY_SEC / dt):
            if len(self.tstreams) > 1:
                # Concatenate timestreams
                ts = tod.concatenate(self.tstreams, start=start_ind, stop=stop_ind)
            else:
                ts = self.tstreams[0]
            _, dec = ephem.object_coords(
                self.src, all_t[0], deg=True, obs=self.observer
            )
            ts.attrs["dec"] = dec
            ts.attrs["source_name"] = self.source
            ts.attrs["transit_time"] = self.cur_transit
            ts.attrs["observation_id"] = self.obs_id
            ts.attrs["tag"] = "{}_{:0>4d}_{}".format(
                self.source,
                self.obs_id,
                ephem.unix_to_datetime(self.cur_transit).strftime("%Y%m%dT%H%M%S"),
            )
            ts.attrs["archivefiles"] = filenames
        else:
            self.log.info("Transit too short. Skipping.")
            ts = None

        self.tstreams = []
        self.cur_transit = None

        return ts
Exemplo n.º 4
0
    def _process_current_lsd(self):
        # Override with a weather file specific version. It's not clear *why* exactly
        # this is needed

        # Check if we have weather data for this day.
        if len(self._timestream_list) == 0:
            self.log.info("No weather data for this sidereal day")
            return None

        # Check if there is data missing
        # Calculate the length of data in this current LSD
        start = self._timestream_list[0].time[0]
        end = self._timestream_list[-1].time[-1]
        sid_seconds = 86400.0 / ephemeris.SIDEREAL_S

        if (end - start) < (sid_seconds + 2 * self.padding):
            self.log.info("Not enough weather data - skipping this day")
            return None

        lsd = self._current_lsd

        # Convert the current lsd day to unix time and pad it.
        unix_start = self.observer.lsd_to_unix(lsd)
        unix_end = self.observer.lsd_to_unix(lsd + 1)
        self.pad_start = unix_start - self.padding
        self.pad_end = unix_end + self.padding

        times = np.concatenate([ts.time for ts in self._timestream_list])
        start_ind = int(np.argmin(np.abs(times - self.pad_start)))
        stop_ind = int(np.argmin(np.abs(times - self.pad_end)))

        self.log.info("Constructing LSD:%i [%i files]", lsd,
                      len(self._timestream_list))

        # Concatenate timestreams
        ts = tod.concatenate(self._timestream_list,
                             start=start_ind,
                             stop=stop_ind)

        # Make sure that our timestamps of the concatenated files don't fall
        # out of the requested lsd time span
        if (ts.time[0] > unix_start) or (ts.time[-1] < unix_end):
            return None

        ts.attrs["tag"] = "lsd_%i" % lsd
        ts.attrs["lsd"] = lsd

        return ts