Esempio n. 1
0
def _update_spatial_wcs(part, chunk, headers, obs_id):
    logging.debug(f'Begin _update_spatial_wcs for {obs_id}')
    if part.name == '1':
        # DB/NC 22-07-20
        #
        # Use the NAXIS* values from the 'SCI' extension.
        #
        # CD*:  That should give approximately correct pixel scales along the
        # axes in degrees/pixel.
        #
        # The hard coded values for CRPIX assume the RA/DEC values are at the
        # centre of the 60/62 array of pixels in the final product.  The
        # primary header CRPIX values would have referred to the original
        # ~2000 x ~2000 pixel detector array I’m guessing.

        header = headers[0]
        idx = mc.to_int(part.name)
        header['NAXIS1'] = headers[idx].get('NAXIS1')
        header['NAXIS2'] = headers[idx].get('NAXIS2')
        header['CD1_1'] = 3.0 / (header.get('NAXIS1') * 3600.0)
        header['CD2_2'] = 3.0 / (header.get('NAXIS2') * 3600.0)
        header['CD1_2'] = 0.0
        header['CD2_1'] = 0.0
        header['CRPIX1'] = header.get('NAXIS1') / 2.0
        header['CRPIX2'] = header.get('NAXIS2') / 2.0
        wcs_parser = WcsParser(header, obs_id, 0)
        wcs_parser.augment_position(chunk)
        chunk.position_axis_1 = 1
        chunk.position_axis_2 = 2
    logging.debug(f'End _update_spatial_wcs')
Esempio n. 2
0
 def get_version(entry):
     """The parameter may be a URI, or just the file name."""
     # file name looks like:
     # 'VLASS1.2.ql.T20t12.J092604+383000.10.2048.v2.I.iter1.image.
     #                'pbcor.tt0.rms.subim.fits'
     file_name = entry
     if '/' in entry:
         file_name = mc.CaomName(entry).file_name
     bits = file_name.split('.')
     version_str = bits[7].replace('v', '')
     return mc.to_int(version_str)
Esempio n. 3
0
    def _try_to_append_to_cache(self, run_id):
        if run_id is None or run_id == '':
            # the case of no value for run_id
            return

        sem = CFHTCache.semester(run_id)
        sem_int = mc.to_int(sem[:-1])
        updated_content = False
        if len(sem) < 3 or not sem[0] in ['0', '1', '2'] or sem_int < 9:
            # the URL here only works from 2009B on
            return
        base_url = 'http://www.cfht.hawaii.edu/en/science/QSO/'
        semester_url = f'{base_url}20{sem}/'
        self._logger.info(
            f'Checking for semester information at {semester_url}')
        response = mc.query_endpoint(semester_url)
        soup = BeautifulSoup(response.text, features='lxml')
        response.close()

        html_table = soup.find('table')
        rows = html_table.find_all('a', string=re.compile('\\.html'))
        for row in rows:
            inst_url = row.get('href')
            if not isinstance(inst_url, str) and 'qso_prog_' not in inst_url:
                continue
            if not inst_url.startswith('http'):
                inst_url = f'{semester_url}/{inst_url}'

            self._logger.info(
                f'Querying {inst_url} for new project information.')
            inst_response = mc.query_endpoint(inst_url)
            inst_soup = BeautifulSoup(inst_response.text, features='lxml')
            inst_response.close()
            table_rows = inst_soup.find_all('tr')
            for table_row in table_rows:
                tds = table_row.find_all('td')
                count = 0
                for td in tds:
                    if count == 0:
                        program_id = td.text
                    if count == 5:
                        title = td.text
                        updated_content = True
                        self._project_titles[program_id] = \
                            CFHTCache.clean(title)
                        break
                    count += 1
        if updated_content:
            self.save()
        self._cached_semesters.append(CFHTCache.semester(run_id))
    def _rebin_factor(self, a, new_shape):
        """
        Re-bin an array to a new shape.

        :param new_shape must be a factor of a.shape.
        """
        assert len(a.shape) == len(new_shape)
        assert not np.sometrue(np.mod(a.shape, new_shape))

        slices = [
            slice(None, None, mc.to_int(old / new))
            for old, new in zip(a.shape, new_shape)
        ]
        self._logger.debug(slices)
        return a[slices]
Esempio n. 5
0
def update(observation, **kwargs):
    """Called to fill multiple CAOM model elements and/or attributes, must
    have this signature for import_module loading and execution.

    :param observation A CAOM Observation model instance.
    :param **kwargs Everything else."""
    logging.debug('Begin update.')
    mc.check_param(observation, Observation)

    headers = None
    if 'headers' in kwargs:
        headers = kwargs['headers']
    fqn = None
    if 'fqn' in kwargs:
        fqn = kwargs['fqn']

    try:
        for plane in observation.planes.values():
            for artifact in plane.artifacts.values():
                temp_parts = TypedOrderedDict(Part, )
                # need to rename the BINARY TABLE extensions, which have
                # differently telemetry, and remove their chunks
                for part_key in ['1', '2', '3', '4', '5']:
                    if part_key in artifact.parts:
                        hdu_count = mc.to_int(part_key)
                        temp = artifact.parts.pop(part_key)
                        temp.product_type = ProductType.AUXILIARY
                        temp.name = headers[hdu_count].get('EXTNAME')
                        while len(temp.chunks) > 0:
                            temp.chunks.pop()
                        temp_parts.add(temp)
                for part in artifact.parts.values():
                    if part.name == '0':
                        part.product_type = artifact.product_type
                        for chunk in part.chunks:
                            chunk.product_type = artifact.product_type
                            _build_chunk_energy(chunk, headers)
                            _build_chunk_position(chunk, headers,
                                                  observation.observation_id)
                            chunk.time_axis = None
                for part in temp_parts.values():
                    artifact.parts.add(part)
        logging.debug('Done update.')
    except Exception as e:
        logging.error(e)
        logging.debug(traceback.format_exc())
        observation = None
    return observation
Esempio n. 6
0
def rename_parts(observation, headers):
    """
    By default, the values for part.name are extension numbers. Replace those
    with the value of the EXTNAME keyword. The part.name is the key value in
    the TypedOrderedDict, so this is done as a pop/push.

    :param observation Observation instance with parts that may have the
        wrong names
    :param headers astropy FITS Header list
    """
    part_keys = [str(ii) for ii in range(1, headers[0].get('NEXTEND') + 1)]
    for plane in observation.planes.values():
        for artifact in plane.artifacts.values():
            temp_parts = TypedOrderedDict(Part, )
            for part_key in part_keys:
                if part_key in artifact.parts:
                    hdu_count = mc.to_int(part_key)
                    temp = artifact.parts.pop(part_key)
                    temp.name = headers[hdu_count].get('EXTNAME')
                    temp_parts.add(temp)
            for part in temp_parts.values():
                artifact.parts.add(part)
Esempio n. 7
0
def get_position_axis_function_naxis2(header):
    result = mc.to_int(header.get('NAXIS2'))
    if result is not None:
        result = result / 2.0
    return result
    def _do_sci(self, hdu_list, header, storage_name, science_fqn, preview_fqn,
                thumb_fqn):
        logging.debug(f'Do science preview augmentation with {science_fqn}')
        count = 0
        detector = header.get('DETECTOR')
        instrument = header.get('INSTRUME')
        if detector in [
                'SITe-4', 'UBC-1', 'SITe-2', 'SITe-5', 'E2V-1', 'E2V-4'
        ]:
            # unprocessed CCD data
            if detector == 'SITe-4':
                axis = 'NAXIS2'
                naxis1 = mc.to_int(header.get(axis))
                xc = naxis1 / 2
                xs = 512
                xoffset = xc - xs / 2
                rotate = '90.0'
                geometry = '256x' + str(xs) + '+1+' + str(xoffset)
                resize1 = 'x1024'
                resize2 = 'x256'
            else:
                axis = 'NAXIS1'
                naxis1 = mc.to_int(header.get(axis))
                xc = naxis1 / 2
                xs = 512
                xoffset = xc - xs / 2
                rotate = '0.0'
                geometry = str(xs) + 'x256+' + str(xoffset) + '+1'
                resize1 = '1024x1024'
                resize2 = '256'

            if 'Imager' in instrument:
                mc.exec_cmd(f'convert -resize 1024x1024 '
                            f'-normalize -negate {science_fqn} {preview_fqn}')
                mc.exec_cmd(f'convert -resize 256x256 -normalize -negate '
                            f'{science_fqn} {thumb_fqn}')
            else:
                mc.exec_cmd(f'convert -resize {resize1} -rotate {rotate} '
                            f'-normalize -negate {science_fqn} {preview_fqn}')
                mc.exec_cmd(f'convert -crop {geometry} -resize {resize2} '
                            f'-rotate {rotate} -normalize '
                            f'-negate {science_fqn} {thumb_fqn}')
            count = 2
        else:
            # unprocessed RETICON spectrum
            object_type = header.get('OBJECT')
            if object_type is not None:
                naxis1 = header.get('NAXIS1')
                logging.info(f'Object: {object_type}')

                signal = hdu_list[0].data[0]
                baseline = hdu_list[0].data[1]
                flux = np.subtract(signal, baseline)
                wl = []
                for i in range(0, naxis1):
                    wl.append(i + 1)
                wln = np.array(wl)
                self._write_files_to_disk(
                    wln, flux, 'Pixel',
                    f'{storage_name.file_id}: {object_type}', thumb_fqn,
                    preview_fqn)
                count = 2
        return count
Esempio n. 9
0
def update(observation, **kwargs):
    """Called to fill multiple CAOM model elements and/or attributes (an n:n
    relationship between TDM attributes and CAOM attributes). Must have this
    signature for import_module loading and execution.

    :param observation A CAOM Observation model instance.
    :param **kwargs Everything else."""
    logging.debug('Begin update.')
    mc.check_param(observation, Observation)

    headers = kwargs.get('headers')
    fqn = kwargs.get('fqn')
    uri = kwargs.get('uri')
    gem_proc_name = None
    # ok not to use builder here, since the obs_id value is never used later
    if uri is not None:
        temp = mc.CaomName(uri).file_name
        gem_proc_name = builder.GemProcName(entry=temp)
    if fqn is not None:
        gem_proc_name = builder.GemProcName(entry=fqn)
    if gem_proc_name is None:
        raise mc.CadcException(f'Need one of fqn or uri defined for '
                               f'{observation.observation_id}')

    for plane in observation.planes.values():
        if plane.product_id != gem_proc_name.product_id:
            continue

        for artifact in plane.artifacts.values():
            for part in artifact.parts.values():
                idx = mc.to_int(part.name)
                header = headers[idx]
                extname = header.get('EXTNAME')
                # DB 22-07-20
                # There are a few other EXTNAME values to look at for
                # part.ProductType.   MDF values would be ‘AUXILIARY’.  The
                # ones currently called “CAL” are likely best set to ‘INFO’
                # since it contains info about datasets used to produce the
                # product.
                #
                # DB 07-08-20
                # EXTNAME  in (‘DQ’, ‘VAR’) should both have
                # ProductType.NOISE.   ‘CAL’ should no longer exist - it’s now
                # BPM. Default type is 'AUXILIARY', 'SCI' is type 'SCIENCE'
                if extname == 'SCI':
                    part.product_type = ProductType.SCIENCE
                elif extname in ['DQ', 'VAR']:
                    part.product_type = ProductType.NOISE
                else:
                    part.product_type = ProductType.AUXILIARY

                if part.product_type in [
                        ProductType.SCIENCE,
                        ProductType.INFO,
                ]:
                    for chunk in part.chunks:
                        filter_name = headers[0].get('FILTER').split('_')[0]
                        _update_energy(
                            chunk,
                            headers[idx],
                            filter_name,
                            observation.observation_id,
                        )
                        _update_time(part, chunk, headers[0],
                                     observation.observation_id)
                        if part.product_type == ProductType.SCIENCE:
                            _update_spatial_wcs(
                                part,
                                chunk,
                                headers,
                                observation.observation_id,
                            )
                            chunk.naxis = header.get('NAXIS')
                            if (chunk.position is None
                                    and chunk.naxis is not None):
                                chunk.naxis = None

                        if (chunk.time is not None
                                and chunk.time.axis is not None
                                and chunk.time.axis.function is not None
                                and chunk.time.axis.function.delta == 1.0):
                            # these are the default values, and they make
                            # the time range start in 1858
                            chunk.time = None
                else:
                    # DB 21-07-20
                    # ignore WCS information unless product type == SCIENCE
                    while len(part.chunks) > 0:
                        del part.chunks[-1]

    if (observation.proposal is not None
            and observation.proposal.id is not None
            and observation.proposal.pi_name is None):
        program = program_metadata.get_pi_metadata(observation.proposal.id)
        if program is not None:
            observation.proposal.pi_name = program.get('pi_name')
            observation.proposal.title = program.get('title')

    if isinstance(observation, SimpleObservation):
        # undo the observation-level metadata modifications for updated
        # Gemini records
        observation.algorithm = Algorithm(name='exposure')
    else:
        cc.update_observation_members(observation)
    logging.debug('Done update.')
    return observation