Example #1
0
            def callback(cutout):
                grid.add_cutout(cutout, frame_index, time_index)

                if grid.is_filled():
                    self._cutout_grids[grid.source] = grid
                    events.send(events.IMG_LOADED, grid.source)
                    logger.info("Triplet grid finished downloading.")
Example #2
0
File: app.py Project: R136a1-/MOP
    def __init__(self, working_directory, output_directory,
                 dry_run=False, debug=False):
        self.dry_run = dry_run

        logger.info("Input directory set to: %s" % working_directory)
        logger.info("Output directory set to: %s" % output_directory)

        working_context = context.get_context(working_directory)
        output_context = context.get_context(output_directory)

        if dry_run and working_context.is_remote():
            sys.stdout.write("A dry run can only be done on local files.\n")
            sys.exit(0)

        if output_context.is_remote():
            sys.stdout.write("The output directory must be local.\n")
            sys.exit(0)

        image_manager = self._create_image_manager()

        progress_manager = working_context.get_progress_manager()
        builder = self._create_workunit_builder(working_context,
                                                output_context,
                                                progress_manager)

        workunit_provider = WorkUnitProvider(self.input_suffix,
                                             working_context,
                                             progress_manager, builder,
                                             randomize=self.should_randomize_workunits)

        prefetching_workunit_provider = PreFetchingWorkUnitProvider(workunit_provider,
                                                                    config.read("PREFETCH.NUMBER"),
                                                                    image_manager)

        if working_context.is_remote():
            synchronization_manager = SynchronizationManager(working_context)
        else:
            synchronization_manager = None

        model = TransAckValidationModel(prefetching_workunit_provider,
                                        image_manager,
                                        synchronization_manager)
        logger.debug("Created model.")

        view = self._create_view(model, debug=debug)

        logger.debug("Created view.")
        model.start_work()

        self.model = model
        self.view = view
        self.controller = view.controller

        self.controller.display_current_image()

        if not synchronization_manager:
            self.view.disable_sync_menu()

        self.view.show()
Example #3
0
 def mark_apertures(self, cutout):
     logger.info("marking apertures on cutout.")
     x, y = cutout.pixel_source_point
     try:
         radii = (cutout.apcor.aperture, cutout.apcor.sky, cutout.apcor.swidth+cutout.apcor.sky)
     except:
         radii = (4, 15,30)
     self._displayables_by_cutout[cutout].place_annulus(x, y, radii, colour='r')
Example #4
0
    def show(self):
        self.mainframe.Show()

        if self.debug:
            logger.info("Launching view in debug mode.")
            wx.lib.inspection.InspectionTool().Show()

        self.wx_app.MainLoop()
Example #5
0
    def show(self):
        self.mainframe.Show()

        if self.debug:
            logger.info("Launching view in debug mode.")
            wx.lib.inspection.InspectionTool().Show()

        self.wx_app.MainLoop()
Example #6
0
    def refresh_certificate(self, username, password):
        download_certificate(username, password)

        logger.info("Downloaded new CADC certificate")

        model = self.app.get_model()
        model.refresh_vos_client()

        self.app.get_view().show_image_loading_dialog()
        model.start_loading_images()
Example #7
0
File: app.py Project: R136a1-/MOP
def preload_iraf():
    logger.info("Preloading IRAF")

    # NOTE: Force expensive loading of libraries up front.  These are
    # libraries that the reals task needs but the candidates task
    # doesn't.  To make sure the candidates task doesn't load them, we
    # import them directly in the functions/methods where they are used.
    # TODO: find out what the best practice is for handling this sort of
    # situation and refactor.
    from pyraf import iraf
Example #8
0
    def refresh_certificate(self, username, password):
        download_certificate(username, password)

        logger.info("Downloaded new CADC certificate")

        model = self.app.get_model()
        model.refresh_vos_client()

        self.app.get_view().show_image_loading_dialog()
        model.start_loading_images()
Example #9
0
File: ssos.py Project: R136a1-/MOP
    def query_ssos(self, mpc_observations, lunation_count=None):
        # we observe ~ a week either side of new moon
        # but we don't know when in the dark run the discovery happened
        # so be generous with the search boundaries, add extra 2 weeks
        # current date just has to be the night of the triplet,

        """

        :param mpc_observations: a list of mpc.Observations
        :param lunation_count: how many dark runs (+ and -) to search into
        :return: an SSOSData object
        """
        if lunation_count is None:
            search_start_date = Time('2013-02-08', scale='utc')
            search_end_date = Time(datetime.datetime.now().strftime('%Y-%m-%d'), scale='utc')
        else:
            search_start_date = Time((mpc_observations[0].date.jd - (
                self._nights_per_darkrun +
                lunation_count*self._nights_separating_darkruns) ),
                                     format='jd', scale='utc')
            search_end_date = Time((mpc_observations[-1].date.jd + (
                self._nights_per_darkrun +
                lunation_count*self._nights_separating_darkruns) ),
                                   format='jd', scale='utc')

        query = Query(mpc_observations,
                      search_start_date=search_start_date,
                      search_end_date=search_end_date)
        tracks_data = self.ssos_parser.parse(query.get())

        tracks_data.mpc_observations = {}
        for mpc_observation in mpc_observations:
            # attach the input observations to the the SSOS query result.
            assert isinstance(mpc_observation,mpc.Observation)
            tracks_data.mpc_observations[mpc_observation.comment.frame] = mpc_observation

        for source in tracks_data.get_sources():
            astrom_observations = tracks_data.observations
            source_readings = source.get_readings()
            for idx in range(len(source_readings)):
                source_reading = source_readings[idx]
                astrom_observation = astrom_observations[idx]
                logger.info("About to call orbfit predict")
                self.orbit.predict(astrom_observation.header['MJD_OBS_CENTER'])
                logger.info("Finished predict")
                source_reading.pa = self.orbit.pa
                # why are these being recorded just in pixels?
                source_reading.dra = self.orbit.dra / astrom_observation.header['SCALE']
                source_reading.ddec = self.orbit.ddec / astrom_observation.header['SCALE']

                frame = astrom_observation.rawname
                if frame in tracks_data.mpc_observations:
                    source_reading.discovery = tracks_data.mpc_observations[frame].discovery

        return tracks_data  # a SSOSData with .sources and .observations only
Example #10
0
    def get_workunit(self, ignore_list=None):
        """
        Gets a new unit of work.

        Args:
          ignore_list: list(str)
            A list of filenames which should be ignored.  Defaults to None.

        Returns:
          new_workunit: WorkUnit
            A new unit of work that has not yet been processed.  A lock on
            it has been acquired.

        Raises:
          NoAvailableWorkException
            There is no more work available.
        """
        if ignore_list is None:
            ignore_list = []

        potential_files = self.get_potential_files(ignore_list)

        while len(potential_files) > 0:
            potential_file = self.select_potential_file(potential_files)
            potential_files.remove(potential_file)

            if self._filter(potential_file):
                continue

            if self.directory_context.get_file_size(potential_file) == 0:
                continue

            if self.progress_manager.is_done(potential_file):
                self._done.append(potential_file)
                continue
            else:
                try:
                    self.progress_manager.lock(potential_file)
                except FileLockedException:
                    continue

                self._already_fetched.append(potential_file)

                return self.builder.build_workunit(
                    self.directory_context.get_full_path(potential_file))

        logger.info("No eligible workunits remain to be fetched.")

        raise NoAvailableWorkException()
Example #11
0
    def _do_prefetch_workunit(self):
        try:
            workunit = self.workunit_provider.get_workunit(
                ignore_list=self.fetched_files)
            filename = workunit.get_filename()

            # 2 or more threads created back to back could end up
            # retrieving the same workunit.  Only keep one of them.
            if filename not in self.fetched_files:
                self.fetched_files.append(filename)
                self.workunits.append(workunit)

                logger.info("%s was prefetched." % filename)

        except NoAvailableWorkException:
            self._all_fetched = True
Example #12
0
File: app.py Project: R136a1-/MOP
def create_application(taskname, working_directory, output_directory,
                       dry_run=False, debug=False):
    logger.info("Starting %s task." % taskname)

    if taskname == tasks.CANDS_TASK:
        ProcessCandidatesApplication(working_directory, output_directory,
                                     dry_run=dry_run, debug=debug)
    elif taskname == tasks.REALS_TASK:
        ProcessRealsApplication(working_directory, output_directory,
                                dry_run=dry_run, debug=debug)
    elif taskname == tasks.TRACK_TASK:
        ProcessTracksApplication(working_directory, output_directory,
                                 dry_run=dry_run, debug=debug)
    else:
        error_message = "Unknown task: %s" % taskname
        logger.critical(error_message)
        raise ValueError(error_message)
Example #13
0
    def get_workunit(self, ignore_list=None):
        """
        Gets a new unit of work.

        Args:
          ignore_list: list(str)
            A list of filenames which should be ignored.  Defaults to None.

        Returns:
          new_workunit: WorkUnit
            A new unit of work that has not yet been processed.  A lock on
            it has been acquired.

        Raises:
          NoAvailableWorkException
            There is no more work available.
        """
        if ignore_list is None:
            ignore_list = []

        potential_files = self.get_potential_files(ignore_list)

        while len(potential_files) > 0:
            potential_file = self.select_potential_file(potential_files)
            potential_files.remove(potential_file)

            if self.directory_context.get_file_size(potential_file) == 0:
                continue

            if self.progress_manager.is_done(potential_file):
                self._done.append(potential_file)
                continue
            else:
                try:
                    self.progress_manager.lock(potential_file)
                except FileLockedException:
                    continue

                self._already_fetched.append(potential_file)

                return self.builder.build_workunit(
                    self.directory_context.get_full_path(potential_file))

        logger.info("No eligible workunits remain to be fetched.")

        raise NoAvailableWorkException()
Example #14
0
    def _do_prefetch_workunit(self):
        try:
            workunit = self.workunit_provider.get_workunit(
                ignore_list=self.fetched_files)
            filename = workunit.get_filename()

            # 2 or more threads created back to back could end up
            # retrieving the same workunit.  Only keep one of them.
            if filename not in self.fetched_files:
                self.fetched_files.append(filename)
                self.workunits.append(workunit)
                self.image_manager.download_singlets_for_workunit(workunit)

                logger.info("%s was prefetched." % filename)

        except NoAvailableWorkException:
            self._all_fetched = True
Example #15
0
    def retrieve_comparison_image(self, downloader):
        """
        Search the DB for a comparison image for this cutout.
        """
        # selecting comparator when on a comparator should load a new one.

        ref_wcs = wcs.WCS(self.fits_header)
        try:
            ref_x = self.fits_header['NAXIS1'] / 2.0
            ref_y = self.fits_header['NAXIS2'] / 2.0
            (ref_ra, ref_dec) = ref_wcs.xy2sky(ref_x, ref_y)
        except Exception as e:
            logger.info(str(e))
            logger.info(str(self.fits_header))
            return None

        dra = self.fits_header['CD1_1'] * self.fits_header['NAXIS1'] / 2.0
        ddec = self.fits_header['CD2_2'] * self.fits_header['NAXIS2'] / 2.0
        radius = max(dra, ddec)

        logger.info("BOX({} {} {} {})".format(ref_ra, ref_dec, dra, ddec))

        query_result = storage.cone_search(ref_ra, ref_dec, dra, ddec)  # returns an astropy.table.table.Table

        comparison = None
        if len(query_result['collectionID']) > 0:  # are there any comparison images even available on that sky?
            for collectionID in query_result['collectionID']:
                if collectionID not in self._bad_comparison_images:
                    comparison = collectionID
                    self._bad_comparison_images.append(comparison)
                    break
            if comparison is None:
                logger.critical(str(self.fits_header))
                self._comparison_image = None
                return
        else:
            query_result.pprint()
            logger.info("No comparison images available for this piece of sky.")
            print "No comparison images available for this piece of sky."
            self._comparison_image = None
            return

        base_url = "https://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/vospace/nodes/OSSOS/dbimages/{}/{}p.fits".format(
            comparison, comparison)
        cutout = 'CIRCLE ICRS {} {} {}'.format(ref_ra, ref_dec, radius)
        url = base_url + "?" + urllib.urlencode({'view': 'cutout', 'cutout': cutout})

        hdu_list = downloader.download_hdulist(uri=None, URL=url)

        comp_wcs = wcs.WCS(hdu_list[-1].header)
        (x, y) = comp_wcs.sky2xy(ref_ra, ref_dec)
        obs = Observation(str(comparison), 'p', ccdnum=str(hdu_list[-1].header.get('EXTVER', 0)))
        reading = SourceReading(x, y, ref_x, ref_y, ref_ra, ref_dec, ref_x, ref_y, obs, is_inverted=False)
        self._comparison_image = SourceCutout(reading, hdu_list, CoordinateConverter(0, 0))
Example #16
0
def create_application(task_name,
                       working_directory,
                       output_directory,
                       dry_run=False,
                       debug=False,
                       name_filter=None,
                       user_id=None,
                       skip_previous=False):
    logger.info("Starting %s task." % task_name)

    if task_name == tasks.CANDS_TASK:
        ProcessCandidatesApplication(working_directory,
                                     output_directory,
                                     dry_run=dry_run,
                                     debug=debug,
                                     name_filter=name_filter,
                                     user_id=user_id)
    elif task_name == tasks.REALS_TASK:
        ProcessRealsApplication(working_directory,
                                output_directory,
                                dry_run=dry_run,
                                debug=debug,
                                name_filter=name_filter,
                                user_id=user_id)
    elif task_name == tasks.TRACK_TASK:
        ProcessTracksApplication(working_directory,
                                 output_directory,
                                 dry_run=dry_run,
                                 debug=debug,
                                 name_filter=name_filter,
                                 skip_previous=skip_previous,
                                 user_id=user_id)
    else:
        error_message = "Unknown task: %s" % task_name
        logger.critical(error_message)
        raise ValueError(error_message)
Example #17
0
 def enable_synchronization(self):
     if self.synchronization_manager:
         self.synchronization_manager.enable_sync()
         logger.info("Synchronization enabled")
Example #18
0
 def do_synchronize(self, local_path):
     remote_uri = self.get_remote_uri(local_path)
     logger.info("Syncing %s to %s." % (local_path, remote_uri))
     storage.copy(local_path, remote_uri)
Example #19
0
 def disable_synchronization(self):
     if self.synchronization_manager:
         self.synchronization_manager.disable_sync()
         logger.info("Synchronization disabled")
Example #20
0
 def use_singlets(self):
     logger.info("Model set to use image singlets.")
     self.image_state = SingletState(self)
     self.image_state.enter_state()
Example #21
0
 def use_triplets(self):
     logger.info("Model set to use image triplets.")
     self.image_state = TripletState(self)
     self.image_state.enter_state()
Example #22
0
 def use_triplets(self):
     logger.info("Model set to use image triplets.")
     self.image_state = TripletState(self)
     self.image_state.enter_state()
Example #23
0
 def enable_synchronization(self):
     if self.synchronization_manager:
         self.synchronization_manager.enable_sync()
         logger.info("Synchronization enabled")
Example #24
0
    def __init__(self,
                 working_directory,
                 output_directory,
                 dry_run=False,
                 debug=False,
                 name_filter=None,
                 user_id=None):
        self.dry_run = dry_run
        self.user_id = user_id
        logger.info("Input directory set to: %s" % working_directory)
        logger.info("Output directory set to: %s" % output_directory)

        working_context = context.get_context(working_directory,
                                              userid=self.user_id)
        output_context = context.get_context(output_directory,
                                             userid=self.user_id)

        if dry_run and working_context.is_remote():
            sys.stdout.write("A dry run can only be done on local files.\n")
            sys.exit(0)

        if output_context.is_remote():
            sys.stdout.write("The output directory must be local.\n")
            sys.exit(0)

        image_manager = self._create_image_manager()

        progress_manager = working_context.get_progress_manager()
        builder = self._create_workunit_builder(working_context,
                                                output_context,
                                                progress_manager)

        workunit_provider = WorkUnitProvider(
            self.input_suffix,
            working_context,
            progress_manager,
            builder,
            randomize=self.should_randomize_workunits,
            name_filter=name_filter)

        prefetching_workunit_provider = PreFetchingWorkUnitProvider(
            workunit_provider, config.read("PREFETCH.NUMBER"), image_manager)

        if working_context.is_remote():
            synchronization_manager = SynchronizationManager(working_context,
                                                             sync_enabled=True)
        else:
            synchronization_manager = None

        model = TransAckValidationModel(prefetching_workunit_provider,
                                        image_manager, synchronization_manager)
        logger.debug("Created model.")

        view = self._create_view(model, debug=debug)

        logger.debug("Created view.")
        model.start_work()

        self.model = model
        self.view = view
        self.controller = view.controller

        self.controller.display_current_image()

        if not synchronization_manager:
            self.view.disable_sync_menu()

        self.view.show()
Example #25
0
def preload_iraf():
    logger.info("Preloading IRAF")
Example #26
0
 def disable_synchronization(self):
     if self.synchronization_manager:
         self.synchronization_manager.disable_sync()
         logger.info("Synchronization disabled")
Example #27
0
 def use_singlets(self):
     logger.info("Model set to use image singlets.")
     self.image_state = SingletState(self)
     self.image_state.enter_state()
Example #28
0
    def download_cutout(self, reading, focus=None, needs_apcor=False):
        """
        Downloads a cutout of the FITS image for a given source reading.

        Args:
          source_reading: ossos.astrom.SourceReading
            The reading which will be the focus of the downloaded image.
          focus: tuple(int, int)
            The x, y coordinates that should be the focus of the downloaded
            image.  These coordinates should be in terms of the
            source_reading parameter's coordinate system.
            Default value is None, in which case the source reading's x, y
            position is used as the focus.
          needs_apcor: bool
            If True, the apcor file with data needed for photometry
            calculations is downloaded in addition to the image.
            Defaults to False.

        Returns:
          cutout: ossos.downloads.data.SourceCutout
        """
        if focus is None:
            focus = reading.source_point

        cutout_str, converter = self.cutout_calculator.build_cutout_str(
            reading.get_extension(),
            focus,
            reading.get_original_image_size(),
            dx = reading.dx,
            dy = reading.dy,
            inverted=reading.is_inverted())

        image_uri = reading.get_image_uri()
        cutout = re.findall(r'(\d+)', cutout_str)
        y2 = int(cutout[-1])
        y1 = int(cutout[-2])
        logger.info("Calculated cutout: %s for %s"
                     % (cutout_str, image_uri))

        hdulist = self.download_hdulist(image_uri, view="cutout",
                                        cutout=cutout_str)
        # modify the DATASEC to account for possible flip/flop and changes in dimensions of the image.
        (NAXIS1, NAXIS2) = reading.get_original_image_size()
        DATASEC = hdulist[0].header.get('DATASEC',None)
        if DATASEC is not None:
            datasec = re.findall(r'(\d+)', DATASEC)
            if y2 < y1:
                x2 = int(NAXIS1) - int(datasec[0]) + 1
                x1 = int(NAXIS1) - int(datasec[1]) + 1
                y2 = int(NAXIS2) - int(datasec[2]) + 1
                y1 = int(NAXIS2) - int(datasec[3]) + 1
                logger.info("Flip/Flopped DATASEC from {} to [{}:{}:{}:{}]".format(DATASEC, x1,x2,y1,y2))
                datasec = (x1,x2,y1,y2)
            (x1,y1) = converter.convert((int(datasec[0]),int(datasec[2])))
            x1 = max(1,x1)
            y1 = max(1,y1)
            (x2,y2) = converter.convert((int(datasec[1]),int(datasec[3])))
            x2 = min(x2, int(hdulist[0].header['NAXIS1']))
            y2 = min(y2, int(hdulist[0].header['NAXIS2']))
            datasec = "[{}:{},{}:{}]".format(x1,x2,y1,y2)
            logger.info("Trimmed and offset DATASEC from {} to {}".format(DATASEC, datasec))

            hdulist[0].header['DATASEC'] = datasec

        apcor = None
        if needs_apcor:
            try:
                apcor = self.download_apcor(reading.get_apcor_uri())
            except:
                apcor = None
        zmag = None
        try:
            zmag = self.download_zmag(reading.get_zmag_uri())
        except Exception as e:
	    logger.debug(str(e))
            pass

        return SourceCutout(reading, hdulist, converter, apcor, zmag=zmag)
Example #29
0
File: ssos.py Project: drusk/MOP
    def parse(self, ssos_result_filename_or_lines):
        """
        given the result table create 'source' objects.

        :type ssos_result_table: Table
        :param ssos_result_table:
        """
        table_reader = ascii.get_reader(Reader=ascii.Basic)
        table_reader.inconsistent_handler = self._skip_missing_data
        table_reader.header.splitter.delimiter = '\t'
        table_reader.data.splitter.delimiter = '\t'
        table = table_reader.read(ssos_result_filename_or_lines)

        sources = []
        observations = []
        source_readings = []

        ref_pvwcs = None
        downloader = Downloader()
        warnings.filterwarnings('ignore')

        for row in table:
            # check if a dbimages object exists
            ccd = int(row['Ext']) - 1
            expnum = row['Image'].rstrip('p')

            # ADDING THIS TEMPORARILY TO GET THE NON-OSSOS DATA OUT OF THE WAY WHILE DEBUGGING
            if (row['Telescope_Insturment'] != 'CFHT/MegaCam') or (row['Filter'] != 'r.MP9601'):
                continue

            # it's fine for OSSOS, go get the image
            image_uri = storage.dbimages_uri(expnum=expnum,
                                             ccd=None,
                                             version='p',
                                             ext='.fits',
                                             subdir=None)
            logger.info('Trying to access %s\n%s' % (row.data, image_uri))

            if not storage.exists(image_uri, force=False):
                logger.warning('Image not in dbimages? Trying subdir.')
                image_uri = storage.dbimages_uri(expnum=expnum,
                                                 ccd=ccd,
                                                 version='p')

                if not storage.exists(image_uri, force=False):
                    logger.warning("Image doesn't exist in ccd subdir. %s" % image_uri)
                    continue

            if row['X'] == -9999 or row['Y'] == -9999 :
                logger.warning("Skipping %s as x/y not resolved." % ( row['Image']))
                continue

            mopheader_uri = storage.dbimages_uri(expnum=expnum,
                                                 ccd=ccd,
                                                 version='p',
                                                 ext='.mopheader')

            if not mopheader_uri in mopheaders:
                if not storage.exists(mopheader_uri, force=False):
                    logger.warning('mopheader missing, but images exists')
                    continue

                # raise flag if no MOPHEADER
                mopheader_fpt = cStringIO.StringIO(storage.open_vos_or_local(mopheader_uri).read())
                mopheader = fits.open(mopheader_fpt)
                mopheaders[mopheader_uri] = mopheader
            mopheader = mopheaders[mopheader_uri]
            
            # Build astrom.Observation
            observation = astrom.Observation(expnum=str(expnum),
                                             ftype='p',
                                             ccdnum=str(ccd),
                                             fk="")

            observation.rawname = os.path.splitext(os.path.basename(image_uri))[0]+str(ccd).zfill(2)

            observation.header = mopheader[0].header
            MJD_OBS_CENTER = mpc.Time(observation.header['MJD-OBSC'],
                                      format='mjd',
                                      scale='utc', precision=5 ).replicate(format='mpc')
            observation.header['MJD_OBS_CENTER'] = str(MJD_OBS_CENTER)
            observation.header['MAXCOUNT'] = MAXCOUNT
            observation.header['SCALE'] = observation.header['PIXSCALE']
            #observation.header['CHIP'] = str(observation.header['CHIPNUM']).zfill(2)
            observation.header['NAX1'] = observation.header['NAXIS1']
            observation.header['NAX2'] = observation.header['NAXIS2']
            observation.header['MOPversion'] = observation.header['MOP_VER']
            observation.header['FWHM'] = 4



            # a download pixel 1,1 of this data to due offsets with.
            x_cen = int(min(max(1,row['X']),observation.header['NAX1']))
            y_cen = int(min(max(1,row['Y']),observation.header['NAX2']))
            if image_uri not in astheaders:
               hdulist = downloader.download_hdulist(
                   uri=image_uri,
                   view='cutout',
                   cutout='[{}][{}:{},{}:{}]'.format(ccd+1, x_cen, x_cen, y_cen, y_cen))
               astheaders[image_uri] = hdulist
            hdulist = astheaders[image_uri]

            pvwcs = wcs.WCS(hdulist[0].header)
            (ra,dec)  = pvwcs.xy2sky(x_cen, y_cen)
            if ref_pvwcs is None:
                ref_pvwcs = pvwcs
                xref = row['X']
                yref = row['Y']
            (x0, y0) = ref_pvwcs.sky2xy(ra,dec)
            x0 += row['X'] - x_cen
            y0 += row['Y'] - y_cen

            # Build astrom.SourceReading
            observations.append(observation)

            from_input_file = observation.rawname in self.input_rawnames
            null_observation = observation.rawname in self.null_observations

            print observation.rawname, observation.header['MJD_OBS_CENTER'], null_observation, from_input_file

            source_reading = astrom.SourceReading(x=row['X'], y=row['Y'],
                                                        xref=xref, yref=yref,
                                                        x0=x0, y0=y0,
                                                        ra=row['Object_RA'], dec=row['Object_Dec'],
                                                        obs=observation,
                                                        ssos=True,
                                                        from_input_file=from_input_file,
                                                        null_observation=null_observation)
            #if observation.rawname in  self.input_rawnames:
            #    source_readings.insert(0, source_reading)
            #else:
            source_readings.append(source_reading)
        # build our array of SourceReading objects
        sources.append(source_readings)

        warnings.filterwarnings('once')

        return SSOSData(observations, sources, self.provisional_name)
Example #30
0
    args  = parser.parse_args()

    astrom.DATASET_ROOT = args.dbimages
    storage.DBIMAGES = args.dbimages
    storage.MEASURE3 = args.measure3

    ## only measure if we have completed 'reals' on this file.
    reals_filepath = ( args.reals is not None and args.reals ) or args.measure3
    cands_filepath = args.measure3
    cands_filelist = storage.my_glob(cands_filepath+"/fk*.cands.astrom")

    eff_file_list = []
    for cands in cands_filelist:
        cands_filename = os.path.basename(cands)
        if os.path.exists(cands_filename+".eff"):
            logger.info("{}.eff exists, skipping".format(cands_filename))
            continue
        reals_filename = reals_filepath + cands_filename.replace('cands','reals')
        if not (( reals_filename[0:4] == 'vos:' and storage.exists(reals_filename+".DONE") ) or os.access(reals_filename+".DONE",os.R_OK)) :
            # skipping incomplete field/ccd combo
            continue
        sys.stderr.write("Getting list of .mpc files for input "+reals_filename+" ...")
        # find and load any .mpc files associated with this candidate file.
        mpc_list = storage.my_glob(reals_filename+".*.mpc")
        sys.stderr.write(" got {} detections \n".format(len(mpc_list)))
        if not len(mpc_list) > 0 :
            continue
        measures =  {}
        for mpc_fname in mpc_list:
            provisional = mpc_fname.split(".")[-2]
            measures[provisional] = []