def perform_settlement_clustering(self):
        self.logger.debug("Performing Level 1 clustering")

        settlement_clustering = Regions()
        self.regions = settlement_clustering.detect_regions(self.dataset)
        self.base_stations = settlement_clustering.locate_base_stations_proximity(
        )
    def _process_partial_data(self, partial_requested_data):
        '''
        This function is always executed after managing the response of each URL of this Data Source. 
        The data should be processed from the external structure to the form of a DataFrame. 

        Parameters
        ----------
        partial_requested_data : csv
            it is the requested CSV of one URL.

        Returns
        -------
        pd.DataFrame
            a DataFrame with daily [Date] as row indexer and [Region, Data Item] as column multiindexer.
        '''
        region_codes_ine = Regions._get_property(self.regions, self.__class__.REGION_REPRESENTATION)
        codesine_regions_dict = dict(zip(region_codes_ine,self.regions))
        
        df = partial_requested_data[(partial_requested_data['cod_ine_ambito'].isin(region_codes_ine)) & (partial_requested_data['nombre_sexo'] == 'todos') & (partial_requested_data['nombre_gedad'] == 'todos')]
        df.cod_ine_ambito = df.cod_ine_ambito.astype(int).astype(str).apply(lambda x: x.zfill(2)).replace(codesine_regions_dict)
        df.rename(columns={'cod_ine_ambito':'Region'},inplace=True)
        df.drop(['ambito','cod_ambito','nombre_ambito','cod_sexo','nombre_sexo','cod_gedad','nombre_gedad'],axis='columns',errors='ignore',inplace=True)
        df = df[['Region','fecha_defuncion']+self.data_items]
        df.set_index(['Region'],inplace=True)
        df = df.pivot_table(index='fecha_defuncion',columns='Region').swaplevel(i=0,j=1,axis='columns')
        df.columns.rename("Item",level=1,inplace=True)
        df.set_index(pd.to_datetime(df.index, format="%Y-%m-%d"),inplace=True)  
        return df
    def __init__(self,
                 elements_layout,
                 bounds_domain,
                 curvature=0.,
                 info=None,
                 name=None,
                 pre_No=None):
        SW = (bounds_domain[0][0], bounds_domain[1][0])
        SE = (bounds_domain[0][1], bounds_domain[1][0])
        NW = (bounds_domain[0][0], bounds_domain[1][1])
        NE = (bounds_domain[0][1], bounds_domain[1][1])
        regions = {
            'R_crazy': (SW, SE, NW, NE),
        }
        region_type = {}
        boundaries_dict = {
            'South': ('0S', ),
            'North': ('0N', ),
            'West': ('0W', ),
            'East': ('0E', )
        }
        REGIONS = (regions, region_type, boundaries_dict)
        R = Regions(REGIONS)

        super().__init__(2, R, elements_layout, info, name, pre_No)

        if curvature > 0.316:
            warnings.warn(
                " <MESH> <CRAZY> : curvature={} > 0.316 (normally curvature in [0, 0.3]), grid may go outside domain somewhere!"
                .format(curvature))
        self._curvature = curvature

        (self._x_bounds, self._y_bounds) = bounds_domain
        self._bounds_domain = bounds_domain
示例#4
0
    def __get_stations_by_regions(self):
        """
        Gets the aemet stations per region

        Returns
        -------
        dict {str : str}
            a dictionary with instance regions as keys, and a string with the aemet stations separated by commas as values.

        Notes
        -----
        * It is used for completing 'idema' argument in the AEMET API function (https://opendata.aemet.es/dist/index.html?#!/valores-climatologicos/Climatolog%C3%ADas_diarias)
        """
        aemet_stations = Regions._get_property(
            self.regions, self.__class__.REGION_REPRESENTATION)
        str_aemet_stations = {}

        pos_region = 0
        for stations in aemet_stations:
            str_aemet_stations[self.regions[pos_region]] = ''
            for station in stations:
                str_aemet_stations[
                    self.regions[pos_region]] = str_aemet_stations[
                        self.regions[pos_region]] + "," + station
            str_aemet_stations[self.regions[pos_region]] = str_aemet_stations[
                self.regions[pos_region]][1:]
            pos_region = pos_region + 1

        return str_aemet_stations
示例#5
0
    def from_regions(cls, regions, **kwargs):
        """Create region geom from list of regions

        The regions are combined with union to a compound region.

        Parameters
        ----------
        regions : list of `~regions.SkyRegion` or str
            Regions
        **kwargs: dict
            Keyword arguments forwarded to `RegionGeom`

        Returns
        -------
        geom : `RegionGeom`
            Region map geometry
        """
        if isinstance(regions, str):
            regions = Regions.parse(data=regions, format="ds9")
        elif isinstance(regions, SkyRegion):
            regions = [regions]
        elif isinstance(regions, SkyCoord):
            regions = [PointSkyRegion(center=regions)]

        if regions:
            regions = regions_to_compound_region(regions)

        return cls(region=regions, **kwargs)
示例#6
0
def compound_region_to_regions(region):
    """Create list of regions from compound regions.

    Parameters
    ----------
    region : `~regions.CompoundSkyRegion` or `~regions.SkyRegion`
        Compound sky region

    Returns
    -------
    regions : `~regions.Regions`
        List of regions.
    """
    regions = Regions([])

    if isinstance(region, CompoundSkyRegion):
        if region.operator is operator.or_:
            regions_1 = compound_region_to_regions(region.region1)
            regions.extend(regions_1)

            regions_2 = compound_region_to_regions(region.region2)
            regions.extend(regions_2)
        else:
            raise ValueError("Only union operator supported")
    else:
        return Regions([region])

    return regions
def get_dataframe_by_definition(def_dict):
    """Return dataframe corresponding to definition dict."""
    file_path = os.path.join(def_dict['folder'], def_dict['filename'])
    if 'anchor' in def_dict.keys():
        gen = read_sheet(file_path, def_dict['sheet'], def_dict['anchor'])
    else:
        gen = read_sheet(file_path, def_dict['sheet'])
    try:
        df = get_dataframe(gen)[Regions.names()]
    except:
        raise ValueError(file_path)
    df.insert(0, 'varname', def_dict['varname'])
    return df
def get_dataframe_by_definition(def_dict):
    """Return dataframe corresponding to definition dict."""   
    file_path = os.path.join(def_dict['folder'], def_dict['filename'])
    if 'anchor' in def_dict.keys():
        gen = read_sheet(file_path, def_dict['sheet'], def_dict['anchor']) 
    else:
        gen = read_sheet(file_path, def_dict['sheet'])
    try:
       df = get_dataframe(gen)[Regions.names()]
    except:
       raise ValueError(file_path)    
    df.insert(0, 'varname', def_dict['varname'])
    return df 
示例#9
0
def test_compound_region_center():
    regions_ds9 = ("galactic;"
                   "circle(1,1,0.1);"
                   "circle(-1,1,0.1);"
                   "circle(1,-1,0.1);"
                   "circle(-1,-1,0.1);")

    regions = Regions.parse(regions_ds9, format="ds9")

    region = regions_to_compound_region(regions)

    center = compound_region_center(region)

    assert_allclose(center.galactic.l.wrap_at("180d"), 0 * u.deg, atol=1e-6)
    assert_allclose(center.galactic.b, 0 * u.deg, atol=1e-6)
示例#10
0
    def _to_region_table(self):
        """Export region to a FITS region table."""
        if self.region is None:
            raise ValueError("Region definition required.")

        # TODO: make this a to_hdulist() method
        region_list = compound_region_to_regions(self.region)
        pixel_region_list = []
        for reg in region_list:
            pixel_region_list.append(reg.to_pixel(self.wcs))
        table = Regions(pixel_region_list).serialize(format="fits")

        header = WcsGeom(wcs=self.wcs, npix=self.wcs.array_shape).to_header()
        table.meta.update(header)
        return table
示例#11
0
    def from_hdulist(cls, hdulist, format="ogip", hdu=None):
        """Read region table and convert it to region list.

        Parameters
        ----------
        hdulist : `~astropy.io.fits.HDUList`
            HDU list
        format : {"ogip", "ogip-arf", "gadf"}
            HDU format

        Returns
        -------
        geom : `RegionGeom`
            Region map geometry

        """
        region_hdu = "REGION"

        if format == "gadf" and hdu:
            region_hdu = hdu + "_" + region_hdu

        if region_hdu in hdulist:
            region_table = Table.read(hdulist[region_hdu])
            wcs = WcsGeom.from_header(region_table.meta).wcs

            regions = []

            for reg in Regions.parse(data=region_table, format="fits"):
                # TODO: remove workaround once regions issue with fits serialization is sorted out
                # see https://github.com/astropy/regions/issues/400
                reg.meta["include"] = True
                regions.append(reg.to_sky(wcs))
            region = regions_to_compound_region(regions)
        else:
            region, wcs = None, None

        if format == "ogip":
            hdu_bands = "EBOUNDS"
        elif format == "ogip-arf":
            hdu_bands = "SPECRESP"
        elif format == "gadf":
            hdu_bands = hdu + "_BANDS"
        else:
            raise ValueError(f"Unknown format {format}")

        axes = MapAxes.from_table_hdu(hdulist[hdu_bands], format=format)
        return cls(region=region, wcs=wcs, axes=axes)
示例#12
0
def catalog_to_regions(catalog,
                       ra='RA',
                       dec='DEC',
                       majax='Maj',
                       minax='Min',
                       PA='PA'):
    '''
    Convert catalog to a list of regions

    Keyword arguments:
    catalog -- Input catalog
    ra, dec, majax, minax, PA -- Column names of containing required variables
    '''
    regions = Regions([
        EllipseSkyRegion(center=SkyCoord(source[ra], source[dec], unit='deg'),
                         height=source[majax] * u.deg,
                         width=source[minax] * u.deg,
                         angle=source[PA] * u.deg) for source in catalog
    ])
    return regions
示例#13
0
    def __get_regions_by_stations(self):
        """
        Gets the region per aemet station.

        Returns
        -------
        dict {str : str}
            a dictionary with aemet stations as keys, and instance regions as values.
        """
        aemet_stations = Regions._get_property(
            self.regions, self.__class__.REGION_REPRESENTATION)
        region_by_aemet_station = {}

        pos_region = 0
        for stations in aemet_stations:
            for station in stations:
                region_by_aemet_station[station] = self.regions[pos_region]
            pos_region = pos_region + 1

        return region_by_aemet_station
示例#14
0
    def inject_references(obj):
        if isinstance(obj, dict):
            if 'file' in obj:
                filename = pjoin(src_path, obj['file'])
                if filename.endswith('csv'):
                    df = pd.read_csv(filename)
                    obj.pop('file')
                    obj.update(df.to_dict(orient='list'))
                elif filename.endswith('.png'):
                    return base64.b64encode(open(filename, 'rb').read())
                elif filename.endswith('xml'):
                    with open(filename, 'rb') as fid:
                        payload = fid.read()
                    return payload
                elif filename.endswith('reg'):
                    return Regions.read(filename).serialize(format='ds9')
                elif filename.endswith('h5') or filename.endswith('hdf5'):
                    payload = Table.read(filename).to_pandas().to_dict(
                        orient='list')
                    return payload
                else:
                    raise NotImplementedError(
                        f'{filename}: Only CSV, PNG, xml, reg, and hdf5 files currently supported for extending individual objects'
                    )

            for k, v in obj.items():
                obj[k] = inject_references(v)
            return obj
        elif isinstance(obj, str) and obj.startswith('='):
            try:
                return references[obj[1:]]
            except KeyError:
                print(
                    f'\nReference {obj[1:]} not found while posting to {endpoint}; skipping'
                )
                raise
        elif isinstance(obj, list):
            return [inject_references(item) for item in obj]
        else:
            return obj
示例#15
0
    def _process_partial_data(self, partial_requested_data):
        '''
        This function is always executed after managing the response of each URL of this Data Source. 
        The data should be processed from the external structure to the form of a DataFrame. 

        Parameters
        ----------
        partial_requested_data : json
            it is the requested JSON of one URL.

        Returns
        -------
        pd.DataFrame
            a DataFrame with [Region] as row indexer and [Data Item] as column indexer.
        '''

        if self.__class__.DATA_ITEMS_INFO[self.data_items[
                self.processed_urls]]['funcion'] == 'DATOS_TABLA':

            df = pd.json_normalize(partial_requested_data, 'Data', ['Nombre'])
            df.Nombre = df.Nombre.replace(
                dict(
                    zip(
                        Regions._get_property(
                            self.regions,
                            self.__class__.REGION_REPRESENTATION),
                        self.regions)),
                regex=True
            )  # some regions contains commas, fix it through configuration file

            # json field 'Nombre' is splitted into Region, SubItem and Item columns
            # SubItem is a subdivision of the seeked Item

            df[['Region', 'SubItem',
                'Item']] = df['Nombre'].str.split(", ", n=2, expand=True)

            # if in Region we find the word 'sexo', we should interchange Region and Subitem Columns
            if any(df.Region.str.contains(pat='sexo', case=False, regex=True)):
                df[['Region', 'SubItem']] = df[['SubItem', 'Region']]
示例#16
0
    def load_static_regions_from_file(self, region_file, region_format='ds9', prefix='region',
                                      max_num_regions=20, **kwargs):
        """Load regions defined in the given file.
        See :ref:`regions:regions_io` for supported file formats.

        Parameters
        ----------
        region_file : str
            Path to region file.

        region_format : {'crtf', 'ds9', 'fits'}
            See :meth:`regions.Regions.get_formats`.

        prefix : str
            Prefix for the Subset names generated by loaded regions.
            Names will have the format of ``<prefix>_<i>``, where ``i``
            is the index in the load order.

        max_num_regions : int
            Maximum number of regions to read from the file, starting
            from top of the file, invalid regions included.

        kwargs : dict
            See :meth:`load_static_regions`.

        Returns
        -------
        bad_regions : dict
            See :meth:`load_static_regions`.

        """
        from regions import Regions

        raw_regs = Regions.read(region_file, format=region_format)
        my_regions = dict([(f'{prefix}_{i}', reg) for i, reg in
                           enumerate(raw_regs[:max_num_regions])])
        return self.load_static_regions(my_regions, **kwargs)
示例#17
0
def test_gcnevents_observations(driver, user, super_admin_token,
                                upload_data_token, view_only_token,
                                ztf_camera):

    datafile = f'{os.path.dirname(__file__)}/../data/GW190425_initial.xml'
    with open(datafile, 'rb') as fid:
        payload = fid.read()
    data = {'xml': payload}

    status, data = api('POST', 'gcn_event', data=data, token=super_admin_token)
    assert status == 200
    assert data['status'] == 'success'

    telescope_name = str(uuid.uuid4())
    status, data = api(
        'POST',
        'telescope',
        data={
            'name': telescope_name,
            'nickname': telescope_name,
            'lat': 0.0,
            'lon': 0.0,
            'elevation': 0.0,
            'diameter': 10.0,
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    telescope_id = data['data']['id']

    fielddatafile = f'{os.path.dirname(__file__)}/../../../data/ZTF_Fields.csv'
    regionsdatafile = f'{os.path.dirname(__file__)}/../../../data/ZTF_Region.reg'

    instrument_name = str(uuid.uuid4())
    status, data = api(
        'POST',
        'instrument',
        data={
            'name': instrument_name,
            'type': 'imager',
            'band': 'Optical',
            'filters': ['ztfr'],
            'telescope_id': telescope_id,
            'field_data':
            pd.read_csv(fielddatafile)[:5].to_dict(orient='list'),
            'field_region':
            Regions.read(regionsdatafile).serialize(format='ds9'),
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    instrument_id = data['data']['id']

    params = {'includeGeoJSON': True}

    # wait for the fields to populate
    nretries = 0
    fields_loaded = False
    while not fields_loaded and nretries < 5:
        try:
            status, data = api(
                'GET',
                f'instrument/{instrument_id}',
                params=params,
                token=super_admin_token,
            )
            assert status == 200
            assert data['status'] == 'success'
            assert data['data']['band'] == 'NIR'

            assert len(data['data']['fields']) == 5
            fields_loaded = True
        except AssertionError:
            nretries = nretries + 1
            time.sleep(3)

    datafile = f'{os.path.dirname(__file__)}/../../../data/sample_observation_data.csv'
    data = {
        'telescopeName': telescope_name,
        'instrumentName': instrument_name,
        'observationData': pd.read_csv(datafile).to_dict(orient='list'),
    }

    status, data = api('POST',
                       'observation',
                       data=data,
                       token=super_admin_token)

    assert status == 200
    assert data['status'] == 'success'

    # wait for the executed observations to populate
    nretries = 0
    observations_loaded = False
    while not observations_loaded and nretries < 5:
        try:
            status, data = api('GET',
                               'observation',
                               params=data,
                               token=super_admin_token)
            assert status == 200
            data = data["data"]
            assert len(data['observations']) == 10
            observations_loaded = True
        except AssertionError:
            nretries = nretries + 1
            time.sleep(3)

    driver.get(f'/become_user/{user.id}')
    driver.get('/gcn_events/2019-04-25T08:18:05')

    driver.wait_for_xpath('//*[text()="190425 08:18:05"]')
    driver.wait_for_xpath('//*[text()="LVC"]')
    driver.wait_for_xpath('//*[text()="BNS"]')

    # test modify sources form
    driver.wait_for_xpath('//*[@id="root_startDate"]').send_keys('04/24/2019')
    driver.wait_for_xpath('//*[@id="root_startDate"]').send_keys(Keys.TAB)
    driver.wait_for_xpath('//*[@id="root_startDate"]').send_keys('01:01')
    driver.wait_for_xpath('//*[@id="root_startDate"]').send_keys('P')
    driver.wait_for_xpath('//*[@id="root_endDate"]').send_keys('04/30/2019')
    driver.wait_for_xpath('//*[@id="root_endDate"]').send_keys(Keys.TAB)
    driver.wait_for_xpath('//*[@id="root_endDate"]').send_keys('01:01')
    driver.wait_for_xpath('//*[@id="root_endDate"]').send_keys('P')
    driver.wait_for_xpath('//*[@id="root_localizationCumprob"]').clear()
    driver.wait_for_xpath('//*[@id="root_localizationCumprob"]').send_keys(
        '1.01')
    driver.wait_for_xpath('//*[@id="root_localizationName"]')
    driver.click_xpath('//*[@id="root_localizationName"]')
    driver.wait_for_xpath('//li[contains(text(), "bayestar.fits.gz")]')
    driver.click_xpath('//li[contains(text(), "bayestar.fits.gz")]')

    submit_button_xpath = (
        '//div[@data-testid="gcnsource-selection-form"]//button[@type="submit"]'
    )
    driver.wait_for_xpath(submit_button_xpath)
    driver.click_xpath(submit_button_xpath)

    # check that the executed observation table appears
    driver.wait_for_xpath('//*[text()="84434604"]')
    driver.wait_for_xpath('//*[text()="ztfr"]')
    driver.wait_for_xpath('//*[text()="1.57415"]')
    driver.wait_for_xpath('//*[text()="20.40705"]')
示例#18
0
def test_token_user_post_get_instrument(super_admin_token):
    name = str(uuid.uuid4())
    status, data = api(
        'POST',
        'telescope',
        data={
            'name': name,
            'nickname': name,
            'lat': 0.0,
            'lon': 0.0,
            'elevation': 0.0,
            'diameter': 10.0,
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    telescope_id = data['data']['id']

    fielddatafile = f'{os.path.dirname(__file__)}/../../../data/ZTF_Fields.csv'
    regionsdatafile = f'{os.path.dirname(__file__)}/../../../data/ZTF_Region.reg'

    instrument_name = str(uuid.uuid4())
    status, data = api(
        'POST',
        'instrument',
        data={
            'name': instrument_name,
            'type': 'imager',
            'band': 'NIR',
            'filters': ['f110w'],
            'telescope_id': telescope_id,
            'field_data': pd.read_csv(fielddatafile)[:5].to_dict(orient='list'),
            'field_region': Regions.read(regionsdatafile).serialize(format='ds9'),
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    instrument_id = data['data']['id']

    params = {'includeGeoJSON': True}

    # wait for the fields to populate
    nretries = 0
    fields_loaded = False
    while not fields_loaded and nretries < 5:
        try:
            status, data = api(
                'GET',
                f'instrument/{instrument_id}',
                params=params,
                token=super_admin_token,
            )
            assert status == 200
            assert data['status'] == 'success'
            assert data['data']['band'] == 'NIR'
            assert len(data['data']['fields']) == 5
            fields_loaded = True
        except AssertionError:
            nretries = nretries + 1
            time.sleep(3)

    params = {'includeGeoJSON': True}

    instrument_id = data['data']['id']
    status, data = api(
        'GET', f'instrument/{instrument_id}', params=params, token=super_admin_token
    )
    assert status == 200
    assert data['status'] == 'success'
    assert data['data']['band'] == 'NIR'

    assert len(data['data']['fields']) == 5

    assert any(
        [
            d['field_id'] == 1
            and d['contour']['features'][0]['geometry']['coordinates'][0][0]
            == [110.84791974982103, -87.01522999646508]
            for d in data['data']['fields']
        ]
    )

    params = {'includeGeoJSONSummary': True}

    instrument_id = data['data']['id']
    status, data = api(
        'GET', f'instrument/{instrument_id}', params=params, token=super_admin_token
    )
    assert status == 200
    assert data['status'] == 'success'
    assert data['data']['band'] == 'NIR'

    assert len(data['data']['fields']) == 5

    assert any(
        [
            d['field_id'] == 1
            and d['contour_summary']['features'][0]['geometry']['coordinates'][0]
            == [1.0238351746164418, -89.93777511600825]
            for d in data['data']['fields']
        ]
    )
from datetime import date
import pandas as pd
import os

from xls_read import read_sheet, read_by_definition, yearmon
from regions import Regions

filter_region_name = Regions.filter_region_name
reference_region_names = Regions.names()
rf_name = Regions.rf_name()
district_names = Regions.district_names()
summable_regions = Regions.summable_regions()

def get_dataframe(datapoints_stream):
    """Return dataframe corresponding to datapoints stream."""        
    list_of_dicts = [{'val':x[0], 'region':x[1], 'dates':x[2]} for x in datapoints_stream]
    df = pd.DataFrame(list_of_dicts)
    df = df.pivot(columns='region', values='val', index='dates')[reference_region_names]
    df.index = pd.DatetimeIndex(df.index)
    return df

def get_dataframe_by_definition(def_dict):
    """Return dataframe corresponding to definition dict."""   
    file_path = os.path.join(def_dict['folder'], def_dict['filename'])
    if 'anchor' in def_dict.keys():
        gen = read_sheet(file_path, def_dict['sheet'], def_dict['anchor']) 
    else:
        gen = read_sheet(file_path, def_dict['sheet'])
    try:
       df = get_dataframe(gen)[Regions.names()]
    except:
 dfs = import_xl_data()
 
 # this import is faster, but series is alphabetic
 # dfs = import_csv_data()
 
 # output 1: save all dataframes to xls by sheet - one df per sheet    
 if '1' in jobs:
     to_xl_book(dfs, tag = 'by_sheet')    
 
 # output 2: concat all dataframes to one xls sheet
 if '2' in jobs:
     r = pd.concat(dfs)
     to_xl_sheet(r, tag = 'one_page', sheet = "regions")
 
 # output 3: make Russia file (1 sheet)    
 rf = Regions.rf_name()
 if '3' in jobs:               
     # note: must have pandas 17 or higher for 'rename'
     df_rf = pd.concat([d[rf].rename(d['varname'][0]) for d in dfs], axis = 1)
     to_xl_sheet(df_rf, tag = 'rf', sheet = 'rf')          
     
 # output 4: make fed districts file (num_var sheets)
 if '4' in jobs:
     cols = ['varname'] + [Regions.rf_name()] + Regions.district_names() 
     dfs2 = [d.reindex(columns=cols) for d in dfs]
     to_xl_book(dfs2, tag = 'districts')        
 
 # output 5: make regions only file (num_var sheets)  
 if '5' in jobs:    
     cols = ['varname'] + [Regions.rf_name()] + Regions.summable_regions()
     dfs3 = [d.reindex(columns=cols) for d in dfs] 
示例#21
0
    def post(self):
        # See bottom of this file for redoc docstring -- moved it there so that
        # it could be made an f-string.

        data = self.get_json()
        telescope_id = data.get('telescope_id')
        telescope = Telescope.get_if_accessible_by(
            telescope_id, self.current_user, raise_if_none=True, mode="read"
        )

        sensitivity_data = data.get("sensitivity_data", None)
        if sensitivity_data:
            filters = data.get("filters", [])
            if not set(sensitivity_data.keys()).issubset(filters):
                return self.error(
                    'Sensitivity_data filters must be a subset of the instrument filters'
                )

        field_data = data.pop("field_data", None)
        field_region = data.pop("field_region", None)

        field_fov_type = data.pop("field_fov_type", None)
        field_fov_attributes = data.pop("field_fov_attributes", None)

        if (field_region is not None) and (field_fov_type is not None):
            return self.error('must supply only one of field_region or field_fov_type')

        if field_region is not None:
            regions = Regions.parse(field_region, format='ds9')
            data['region'] = regions.serialize(format='ds9')

        if field_fov_type is not None:
            if field_fov_attributes is None:
                return self.error(
                    'field_fov_attributes required if field_fov_type supplied'
                )
            if not field_fov_type.lower() in ["circle", "rectangle"]:
                return self.error('field_fov_type must be circle or rectangle')
            if isinstance(field_fov_attributes, list):
                field_fov_attributes = [float(x) for x in field_fov_attributes]
            else:
                field_fov_attributes = [float(field_fov_attributes)]

            center = SkyCoord(0.0, 0.0, unit='deg', frame='icrs')
            if field_fov_type.lower() == "circle":
                if not len(field_fov_attributes) == 1:
                    return self.error(
                        'If field_fov_type is circle, then should supply only radius for field_fov_attributes'
                    )
                radius = field_fov_attributes[0]
                regions = CircleSkyRegion(center=center, radius=radius * u.deg)
            elif field_fov_type.lower() == "rectangle":
                if not len(field_fov_attributes) == 2:
                    return self.error(
                        'If field_fov_type is rectangle, then should supply width and height for field_fov_attributes'
                    )
                width, height = field_fov_attributes
                regions = RectangleSkyRegion(
                    center=center, width=width * u.deg, height=height * u.deg
                )
            data['region'] = regions.serialize(format='ds9')

        schema = Instrument.__schema__()
        try:
            instrument = schema.load(data)
        except ValidationError as exc:
            return self.error(
                'Invalid/missing parameters: ' f'{exc.normalized_messages()}'
            )

        existing_instrument = (
            Instrument.query_records_accessible_by(
                self.current_user,
            )
            .filter(
                Instrument.name == data.get('name'),
                Instrument.telescope_id == telescope_id,
            )
            .first()
        )
        if existing_instrument is None:
            instrument.telescope = telescope
            DBSession().add(instrument)
            DBSession().commit()
        else:
            instrument = existing_instrument

        if field_data is not None:
            if (field_region is None) and (field_fov_type is None):
                return self.error(
                    'field_region or field_fov_type is required with field_data'
                )

            if type(field_data) is str:
                field_data = pd.read_table(StringIO(field_data), sep=",").to_dict(
                    orient='list'
                )

            if not {'ID', 'RA', 'Dec'}.issubset(field_data):
                return self.error("ID, RA, and Dec required in field_data.")

            log(f"Started generating fields for instrument {instrument.id}")
            # run async
            IOLoop.current().run_in_executor(
                None,
                lambda: add_tiles(instrument.id, instrument.name, regions, field_data),
            )

        self.push_all(action="skyportal/REFRESH_INSTRUMENTS")
        return self.success(data={"id": instrument.id})
示例#22
0
from regions import Regions
z = Regions.summable_regions()
print(z)

for r in Regions.district_names():
    w = Regions.region_by_district(r)
    print('\n', r)
    print(w)

# passes now
# assert set(Regions.region_by_district("Уральский федеральный округ")) == set (['Курганская область', 'Свердловская область', 'Ханты-Мансийский авт. округ - Югра', 'Ямало-Ненецкий авт. округ', 'Тюменская область без авт. округов', 'Челябинская область'])

# must pass:
assert set(Regions.region_by_district("Уральский федеральный округ")) == set([
    'Курганская область',
    'Свердловская область',
    #'Ханты-Мансийский авт. округ - Югра',
    #'Ямало-Ненецкий авт. округ',
    #'Тюменская область без авт. округов',
    'Тюменская область',
    'Челябинская область'
])

print(Regions.region_by_district("Северо-Западный федеральный округ"))

# Regions.region_by_district("Северо-Западный федеральный округ") must return:
[
    'Республика Карелия',
    'Республика Коми',
    #'Ненецкий авт. округ',
    #'Архангельская область без авт. округа',
示例#23
0
 def close(self):
     self.mainWindows.setCentralWidget(Regions(self.mainWindows))
示例#24
0
def test_compound_region_center_single():
    region = Regions.parse("galactic;circle(1,1,0.1)", format="ds9")[0]
    center = compound_region_center(region)

    assert_allclose(center.galactic.l.wrap_at("180d"), 1 * u.deg, atol=1e-6)
    assert_allclose(center.galactic.b, 1 * u.deg, atol=1e-6)
示例#25
0
    def put(self, instrument_id):
        """
        ---
        description: Update instrument
        tags:
          - instruments
        parameters:
          - in: path
            name: instrument_id
            required: true
            schema:
              type: integer
        requestBody:
          content:
            application/json:
              schema: InstrumentNoID
        responses:
          200:
            content:
              application/json:
                schema: Success
          400:
            content:
              application/json:
                schema: Error
        """
        data = self.get_json()
        data['id'] = int(instrument_id)

        # permission check
        instrument = Instrument.get_if_accessible_by(
            int(instrument_id), self.current_user, mode='update'
        )
        if instrument is None:
            return self.error(f'Missing instrument with ID {instrument_id}')

        filters = instrument.filters
        sensitivity_data = data.get('sensitivity_data', None)
        if sensitivity_data:
            if not set(sensitivity_data.keys()).issubset(filters):
                return self.error(
                    'Filter names must be present in both sensitivity_data property and filters property'
                )

        field_data = data.pop("field_data", None)
        field_region = data.pop("field_region", None)

        field_fov_type = data.pop("field_fov_type", None)
        field_fov_attributes = data.pop("field_fov_attributes", None)

        if (field_region is not None) and (field_fov_type is not None):
            return self.error('must supply only one of field_region or field_fov_type')

        if field_region is not None:
            regions = Regions.parse(field_region, format='ds9')
            data['region'] = regions.serialize(format='ds9')

        if field_fov_type is not None:
            if field_fov_attributes is None:
                return self.error(
                    'field_fov_attributes required if field_fov_type supplied'
                )
            if not field_fov_type.lower() in ["circle", "rectangle"]:
                return self.error('field_fov_type must be circle or rectangle')
            if isinstance(field_fov_attributes, list):
                field_fov_attributes = [float(x) for x in field_fov_attributes]
            else:
                field_fov_attributes = [float(field_fov_attributes)]

            center = SkyCoord(0.0, 0.0, unit='deg', frame='icrs')
            if field_fov_type.lower() == "circle":
                if not len(field_fov_attributes) == 1:
                    return self.error(
                        'If field_fov_type is circle, then should supply only radius for field_fov_attributes'
                    )
                radius = field_fov_attributes[0]
                regions = CircleSkyRegion(center=center, radius=radius * u.deg)
            elif field_fov_type.lower() == "rectangle":
                if not len(field_fov_attributes) == 2:
                    return self.error(
                        'If field_fov_type is rectangle, then should supply width and height for field_fov_attributes'
                    )
                width, height = field_fov_attributes
                regions = RectangleSkyRegion(
                    center=center, width=width * u.deg, height=height * u.deg
                )
            data['region'] = regions.serialize(format='ds9')

        schema = Instrument.__schema__()
        try:
            schema.load(data, partial=True)
        except ValidationError as exc:
            return self.error(
                'Invalid/missing parameters: ' f'{exc.normalized_messages()}'
            )
        self.verify_and_commit()

        if field_data is not None:
            if (field_region is None) and (field_fov_type is None):
                return self.error(
                    'field_region or field_fov_type is required with field_data'
                )

            if type(field_data) is str:
                field_data = pd.read_table(StringIO(field_data), sep=",").to_dict(
                    orient='list'
                )

            if not {'ID', 'RA', 'Dec'}.issubset(field_data):
                return self.error("ID, RA, and Dec required in field_data.")

            log(f"Started generating fields for instrument {instrument.id}")
            # run async
            IOLoop.current().run_in_executor(
                None,
                lambda: add_tiles(instrument.id, instrument.name, regions, field_data),
            )

        self.push_all(action="skyportal/REFRESH_INSTRUMENTS")
        return self.success()
示例#26
0
def test_observationplan_request(driver, user, super_admin_token,
                                 public_group):

    datafile = f'{os.path.dirname(__file__)}/../data/GW190425_initial.xml'
    with open(datafile, 'rb') as fid:
        payload = fid.read()
    data = {'xml': payload}

    status, data = api('POST', 'gcn_event', data=data, token=super_admin_token)
    assert status == 200
    assert data['status'] == 'success'

    telescope_name = str(uuid.uuid4())
    status, data = api(
        'POST',
        'telescope',
        data={
            'name': telescope_name,
            'nickname': telescope_name,
            'lat': 0.0,
            'lon': 0.0,
            'elevation': 0.0,
            'diameter': 10.0,
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    telescope_id = data['data']['id']

    fielddatafile = f'{os.path.dirname(__file__)}/../../../data/ZTF_Fields.csv'
    regionsdatafile = f'{os.path.dirname(__file__)}/../../../data/ZTF_Region.reg'

    instrument_name = str(uuid.uuid4())
    status, data = api(
        'POST',
        'instrument',
        data={
            'name': instrument_name,
            'type': 'imager',
            'band': 'NIR',
            'filters': ['f110w'],
            'telescope_id': telescope_id,
            "api_classname_obsplan": "ZTFMMAAPI",
            'field_data':
            pd.read_csv(fielddatafile)[:5].to_dict(orient='list'),
            'field_region':
            Regions.read(regionsdatafile).serialize(format='ds9'),
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    instrument_id = data['data']['id']

    params = {'includeGeoJSON': True}

    # wait for the fields to populate
    nretries = 0
    fields_loaded = False
    while not fields_loaded and nretries < 5:
        try:
            status, data = api(
                'GET',
                f'instrument/{instrument_id}',
                token=super_admin_token,
                params=params,
            )
            assert status == 200
            assert data['status'] == 'success'
            assert data['data']['band'] == 'NIR'

            assert len(data['data']['fields']) == 5
            fields_loaded = True
        except AssertionError:
            nretries = nretries + 1
            time.sleep(3)

    status, data = api(
        "POST",
        "allocation",
        data={
            "group_id": public_group.id,
            "instrument_id": instrument_id,
            "hours_allocated": 100,
            "pi": "Ed Hubble",
            '_altdata': '{"access_token": "testtoken"}',
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data["status"] == "success"

    driver.get(f'/become_user/{user.id}')
    driver.get('/gcn_events/2019-04-25T08:18:05')

    driver.wait_for_xpath('//*[text()="190425 08:18:05"]')
    driver.wait_for_xpath('//*[text()="LVC"]')
    driver.wait_for_xpath('//*[text()="BNS"]')

    submit_button_xpath = (
        '//div[@data-testid="observationplan-request-form"]//button[@type="submit"]'
    )
    driver.wait_for_xpath(submit_button_xpath)

    select_box = driver.find_element_by_id(
        "mui-component-select-followupRequestAllocationSelect")
    select_box.click()

    driver.click_xpath(
        f'//li[contains(text(), "{instrument_name}")][contains(text(), "{public_group.name}")]',
        scroll_parent=True,
    )

    # Click somewhere outside to remove focus from instrument select
    driver.click_xpath("//header")

    driver.click_xpath(submit_button_xpath)

    driver.wait_for_xpath(
        f"//div[@data-testid='{instrument_name}-requests-header']", timeout=15)
    driver.click_xpath(
        f"//div[@data-testid='{instrument_name}-requests-header']")
    driver.wait_for_xpath(
        f'//div[contains(@data-testid, "{instrument_name}_observationplanRequestsTable")]//div[contains(., "g,r,i")]',
        timeout=15,
    )
    driver.wait_for_xpath(
        f'''//div[contains(@data-testid, "{instrument_name}_observationplanRequestsTable")]//div[contains(., "complete")]''',
        timeout=15,
    )

    status, data = api("GET", "observation_plan", token=super_admin_token)
    assert status == 200

    observation_plan_request_id = data['data'][-1]['observation_plans'][0][
        'observation_plan_request_id']
    driver.click_xpath(
        f'//a[contains(@data-testid, "gcnRequest_{observation_plan_request_id}")]',
        scroll_parent=True,
    )
    driver.click_xpath(
        f'//button[contains(@data-testid, "treasuremapRequest_{observation_plan_request_id}")]',
        scroll_parent=True,
    )
    driver.click_xpath(
        f'//a[contains(@data-testid, "downloadRequest_{observation_plan_request_id}")]',
        scroll_parent=True,
    )
    driver.click_xpath(
        f'//button[contains(@data-testid, "sendRequest_{observation_plan_request_id}")]',
        scroll_parent=True,
    )
    driver.wait_for_xpath(
        f'''//div[contains(@data-testid, "{instrument_name}_observationplanRequestsTable")]//div[contains(., "submitted to telescope queue")]''',
        timeout=10,
    )
    driver.click_xpath(
        f'//button[contains(@data-testid, "removeRequest_{observation_plan_request_id}")]',
        scroll_parent=True,
    )
    driver.wait_for_xpath(
        f'''//div[contains(@data-testid, "{instrument_name}_observationplanRequestsTable")]//div[contains(., "deleted from telescope queue")]''',
        timeout=10,
    )
示例#27
0
def test_gcn_request(driver, user, super_admin_token, public_group):

    datafile = f'{os.path.dirname(__file__)}/../data/GW190425_initial.xml'
    with open(datafile, 'rb') as fid:
        payload = fid.read()
    data = {'xml': payload}

    status, data = api('POST', 'gcn_event', data=data, token=super_admin_token)
    assert status == 200
    assert data['status'] == 'success'

    telescope_name = str(uuid.uuid4())
    status, data = api(
        'POST',
        'telescope',
        data={
            'name': telescope_name,
            'nickname': telescope_name,
            'lat': 0.0,
            'lon': 0.0,
            'elevation': 0.0,
            'diameter': 10.0,
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    telescope_id = data['data']['id']

    fielddatafile = f'{os.path.dirname(__file__)}/../../../data/ZTF_Fields.csv'
    regionsdatafile = f'{os.path.dirname(__file__)}/../../../data/ZTF_Region.reg'

    instrument_name = str(uuid.uuid4())
    status, data = api(
        'POST',
        'instrument',
        data={
            'name': instrument_name,
            'type': 'imager',
            'band': 'NIR',
            'filters': ['ztfr'],
            'telescope_id': telescope_id,
            "api_classname_obsplan": "ZTFMMAAPI",
            'field_data':
            pd.read_csv(fielddatafile)[:5].to_dict(orient='list'),
            'field_region':
            Regions.read(regionsdatafile).serialize(format='ds9'),
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    instrument_id = data['data']['id']

    params = {'includeGeoJSON': True}

    # wait for the fields to populate
    nretries = 0
    fields_loaded = False
    while not fields_loaded and nretries < 5:
        try:
            status, data = api(
                'GET',
                f'instrument/{instrument_id}',
                params=params,
                token=super_admin_token,
            )
            assert status == 200
            assert data['status'] == 'success'
            assert data['data']['band'] == 'NIR'

            print(data['data'])

            assert len(data['data']['fields']) == 5
            fields_loaded = True
        except AssertionError:
            nretries = nretries + 1
            time.sleep(3)

    datafile = f'{os.path.dirname(__file__)}/../../../data/sample_observation_data.csv'
    data = {
        'telescopeName': telescope_name,
        'instrumentName': instrument_name,
        'observationData': pd.read_csv(datafile).to_dict(orient='list'),
    }

    status, data = api('POST',
                       'observation',
                       data=data,
                       token=super_admin_token)

    assert status == 200
    assert data['status'] == 'success'

    params = {
        'telescopeName': telescope_name,
        'instrumentName': instrument_name,
        'startDate': "2019-04-25 08:18:05",
        'endDate': "2019-04-28 08:18:05",
        'localizationDateobs': "2019-04-25T08:18:05",
        'localizationName': "bayestar.fits.gz",
        'localizationCumprob': 1.01,
        'returnStatistics': True,
    }

    # wait for the executed observations to populate
    nretries = 0
    observations_loaded = False
    while not observations_loaded and nretries < 5:
        try:
            status, data = api('GET',
                               'observation',
                               params=params,
                               token=super_admin_token)
            assert status == 200
            data = data["data"]
            assert len(data['observations']) == 10
            observations_loaded = True
        except AssertionError:
            nretries = nretries + 1
            time.sleep(3)

    driver.get(f'/become_user/{user.id}')
    driver.get('/gcn_events/2019-04-25T08:18:05')

    driver.wait_for_xpath('//*[text()="190425 08:18:05"]')
    driver.wait_for_xpath('//*[text()="LVC"]')
    driver.wait_for_xpath('//*[text()="BNS"]')

    driver.wait_for_xpath('//*[@id="root_localizationName"]')
    driver.click_xpath('//*[@id="root_localizationName"]')
    driver.wait_for_xpath('//li[contains(text(), "bayestar.fits.gz")]')
    driver.click_xpath('//li[contains(text(), "bayestar.fits.gz")]')
    driver.wait_for_xpath('//*[@id="root_localizationCumprob"]').clear()
    driver.wait_for_xpath('//*[@id="root_localizationCumprob"]').send_keys(
        1.01)

    submit_button_xpath = '//button[@type="submit"]'
    driver.wait_for_xpath(submit_button_xpath)
    driver.click_xpath(submit_button_xpath)

    select_box = driver.find_element_by_id(
        "mui-component-select-followupRequestInstrumentSelect")
    select_box.click()

    driver.click_xpath(
        f'//li[contains(text(), "{telescope_name}")][contains(text(), "{instrument_name}")]',
        scroll_parent=True,
    )

    driver.click_xpath(
        f'//a[contains(@data-testid, "observationGcn_{instrument_id}")]',
        scroll_parent=True,
    )
示例#28
0
def test_observation_plan_galaxy(
    user, super_admin_token, upload_data_token, view_only_token, public_group
):
    catalog_name = 'test_galaxy_catalog'

    # in case the catalog already exists, delete it.
    status, data = api(
        'DELETE', f'galaxy_catalog/{catalog_name}', token=super_admin_token
    )

    datafile = f'{os.path.dirname(__file__)}/../../../data/GW190814.xml'
    with open(datafile, 'rb') as fid:
        payload = fid.read()
    data = {'xml': payload}

    status, data = api('POST', 'gcn_event', data=data, token=super_admin_token)
    assert status == 200
    assert data['status'] == 'success'
    gcnevent_id = data['data']['gcnevent_id']

    # wait for event to load
    for n_times in range(26):
        status, data = api(
            'GET', "gcn_event/2019-08-14T21:10:39", token=super_admin_token
        )
        if data['status'] == 'success':
            break
        time.sleep(2)
    assert n_times < 25

    # wait for the localization to load
    params = {"include2DMap": True}
    for n_times_2 in range(26):
        status, data = api(
            'GET',
            'localization/2019-08-14T21:10:39/name/LALInference.v1.fits.gz',
            token=super_admin_token,
            params=params,
        )

        if data['status'] == 'success':
            data = data["data"]
            assert data["dateobs"] == "2019-08-14T21:10:39"
            assert data["localization_name"] == "LALInference.v1.fits.gz"
            assert np.isclose(np.sum(data["flat_2d"]), 1)
            break
        else:
            time.sleep(2)
    assert n_times_2 < 25
    localization_id = data['id']

    name = str(uuid.uuid4())
    status, data = api(
        'POST',
        'telescope',
        data={
            'name': name,
            'nickname': name,
            'lat': 0.0,
            'lon': 0.0,
            'elevation': 0.0,
            'diameter': 10.0,
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    telescope_id = data['data']['id']

    fielddatafile = f'{os.path.dirname(__file__)}/../../../data/ZTF_Fields.csv'
    regionsdatafile = f'{os.path.dirname(__file__)}/../../../data/ZTF_Region.reg'

    instrument_name = str(uuid.uuid4())
    status, data = api(
        'POST',
        'instrument',
        data={
            'name': instrument_name,
            'type': 'imager',
            'band': 'Optical',
            'filters': ['ztfr'],
            'telescope_id': telescope_id,
            'api_classname': 'ZTFAPI',
            'api_classname_obsplan': 'ZTFMMAAPI',
            'field_data': pd.read_csv(fielddatafile)[:5].to_dict(orient='list'),
            'field_region': Regions.read(regionsdatafile).serialize(format='ds9'),
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    instrument_id = data['data']['id']

    # wait for the fields to populate
    nretries = 0
    fields_loaded = False
    while not fields_loaded and nretries < 5:
        try:
            status, data = api(
                'GET',
                f'instrument/{instrument_id}',
                token=super_admin_token,
            )
            assert status == 200
            assert data['status'] == 'success'
            assert data['data']['band'] == 'NIR'

            assert len(data['data']['fields']) == 5
            fields_loaded = True
        except AssertionError:
            nretries = nretries + 1
            time.sleep(3)

    datafile = f'{os.path.dirname(__file__)}/../../../data/CLU_mini.hdf5'
    data = {
        'catalog_name': catalog_name,
        'catalog_data': Table.read(datafile).to_pandas().to_dict(orient='list'),
    }

    status, data = api('POST', 'galaxy_catalog', data=data, token=super_admin_token)
    assert status == 200
    assert data['status'] == 'success'

    params = {'catalog_name': catalog_name}

    nretries = 0
    galaxies_loaded = False
    while nretries < 10:
        status, data = api(
            'GET', 'galaxy_catalog', token=view_only_token, params=params
        )
        assert status == 200
        data = data["data"]["galaxies"]
        if len(data) == 92 and any(
            [
                d['name'] == '6dFgs gJ0001313-055904'
                and d['mstar'] == 336.60756522868667
                for d in data
            ]
        ):
            galaxies_loaded = True
            break
        nretries = nretries + 1
        time.sleep(5)

    assert nretries < 10
    assert galaxies_loaded

    request_data = {
        'group_id': public_group.id,
        'instrument_id': instrument_id,
        'pi': 'Shri Kulkarni',
        'hours_allocated': 200,
        'start_date': '3021-02-27T00:00:00',
        'end_date': '3021-07-20T00:00:00',
        'proposal_id': 'COO-2020A-P01',
    }

    status, data = api('POST', 'allocation', data=request_data, token=super_admin_token)
    assert status == 200
    assert data['status'] == 'success'
    allocation_id = data['data']['id']

    queue_name = str(uuid.uuid4())
    request_data = {
        'allocation_id': allocation_id,
        'gcnevent_id': gcnevent_id,
        'localization_id': localization_id,
        'payload': {
            'start_date': '2019-04-25 01:01:01',
            'end_date': '2019-04-27 01:01:01',
            'filter_strategy': 'block',
            'schedule_strategy': 'galaxy',
            'galaxy_catalog': catalog_name,
            'schedule_type': 'greedy_slew',
            'exposure_time': 300,
            'filters': 'ztfg',
            'maximum_airmass': 2.0,
            'integrated_probability': 100,
            'minimum_time_difference': 30,
            'queue_name': queue_name,
            'program_id': 'Partnership',
            'subprogram_name': 'GRB',
        },
    }

    status, data = api(
        'POST', 'observation_plan', data=request_data, token=super_admin_token
    )
    assert status == 200
    assert data['status'] == 'success'
    id = data['data']['ids'][0]

    # wait for the observation plan to populate
    nretries = 0
    observation_plan_loaded = False
    while not observation_plan_loaded and nretries < 5:
        try:
            status, data = api(
                'GET',
                f'observation_plan/{id}',
                params={"includePlannedObservations": "true"},
                token=super_admin_token,
            )

            assert status == 200
            assert data['status'] == 'success'

            assert data["data"]["gcnevent_id"] == gcnevent_id
            assert data["data"]["allocation_id"] == allocation_id
            assert data["data"]["payload"] == request_data["payload"]

            assert len(data["data"]["observation_plans"]) == 1
            observation_plan = data["data"]["observation_plans"][0]

            assert (
                observation_plan['plan_name'] == request_data["payload"]['queue_name']
            )
            assert observation_plan['validity_window_start'] == request_data["payload"][
                'start_date'
            ].replace(" ", "T")
            assert observation_plan['validity_window_end'] == request_data["payload"][
                'end_date'
            ].replace(" ", "T")

            planned_observations = observation_plan['planned_observations']
            assert len(planned_observations) > 0

            observation_plan_loaded = True

        except AssertionError:
            nretries = nretries + 1
            time.sleep(10)

    assert len(planned_observations) == 29
    assert all(
        [
            obs['filt'] == request_data["payload"]['filters']
            for obs in planned_observations
        ]
    )
    assert all(
        [
            obs['exposure_time'] == int(request_data["payload"]['exposure_time'])
            for obs in planned_observations
        ]
    )
示例#29
0
def test_observation_plan_tiling(
    user, super_admin_token, upload_data_token, view_only_token, public_group
):

    datafile = f'{os.path.dirname(__file__)}/../data/GW190425_initial.xml'
    with open(datafile, 'rb') as fid:
        payload = fid.read()
    data = {'xml': payload}

    status, data = api('POST', 'gcn_event', data=data, token=super_admin_token)
    assert status == 200
    assert data['status'] == 'success'
    gcnevent_id = data['data']['gcnevent_id']

    dateobs = "2019-04-25 08:18:05"
    skymap = "bayestar.fits.gz"
    status, data = api(
        'GET',
        f'localization/{dateobs}/name/{skymap}',
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    localization_id = data['data']['id']

    name = str(uuid.uuid4())
    status, data = api(
        'POST',
        'telescope',
        data={
            'name': name,
            'nickname': name,
            'lat': 0.0,
            'lon': 0.0,
            'elevation': 0.0,
            'diameter': 10.0,
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    telescope_id = data['data']['id']

    fielddatafile = f'{os.path.dirname(__file__)}/../../../data/ZTF_Fields.csv'
    regionsdatafile = f'{os.path.dirname(__file__)}/../../../data/ZTF_Region.reg'

    instrument_name = str(uuid.uuid4())
    status, data = api(
        'POST',
        'instrument',
        data={
            'name': instrument_name,
            'type': 'imager',
            'band': 'Optical',
            'filters': ['ztfr'],
            'telescope_id': telescope_id,
            'api_classname': 'ZTFAPI',
            'api_classname_obsplan': 'ZTFMMAAPI',
            'field_data': pd.read_csv(fielddatafile)[:5].to_dict(orient='list'),
            'field_region': Regions.read(regionsdatafile).serialize(format='ds9'),
            'sensitivity_data': {
                'ztfr': {
                    'limiting_magnitude': 20.3,
                    'magsys': 'ab',
                    'exposure_time': 30,
                    'zeropoint': 26.3,
                }
            },
        },
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'
    instrument_id = data['data']['id']

    # wait for the fields to populate
    time.sleep(15)

    request_data = {
        'group_id': public_group.id,
        'instrument_id': instrument_id,
        'pi': 'Shri Kulkarni',
        'hours_allocated': 200,
        'start_date': '3021-02-27T00:00:00',
        'end_date': '3021-07-20T00:00:00',
        'proposal_id': 'COO-2020A-P01',
    }

    status, data = api('POST', 'allocation', data=request_data, token=super_admin_token)
    assert status == 200
    assert data['status'] == 'success'
    allocation_id = data['data']['id']

    queue_name = str(uuid.uuid4())
    request_data = {
        'allocation_id': allocation_id,
        'gcnevent_id': gcnevent_id,
        'localization_id': localization_id,
        'payload': {
            'start_date': '2019-04-25 01:01:01',
            'end_date': '2019-04-27 01:01:01',
            'filter_strategy': 'block',
            'schedule_strategy': 'tiling',
            'schedule_type': 'greedy_slew',
            'exposure_time': 300,
            'filters': 'ztfr',
            'maximum_airmass': 2.0,
            'integrated_probability': 100,
            'minimum_time_difference': 30,
            'queue_name': queue_name,
            'program_id': 'Partnership',
            'subprogram_name': 'GRB',
        },
    }

    status, data = api(
        'POST', 'observation_plan', data=request_data, token=super_admin_token
    )
    assert status == 200
    assert data['status'] == 'success'
    id = data['data']['ids'][0]

    # wait for the observation plan to finish
    time.sleep(15)

    status, data = api(
        'GET',
        f'observation_plan/{id}',
        params={"includePlannedObservations": "true"},
        token=super_admin_token,
    )
    assert status == 200
    assert data['status'] == 'success'

    assert data["data"]["gcnevent_id"] == gcnevent_id
    assert data["data"]["allocation_id"] == allocation_id
    assert data["data"]["payload"] == request_data["payload"]

    assert len(data["data"]["observation_plans"]) == 1
    observation_plan = data["data"]["observation_plans"][0]

    assert observation_plan['plan_name'] == request_data["payload"]['queue_name']
    assert observation_plan['validity_window_start'] == request_data["payload"][
        'start_date'
    ].replace(" ", "T")
    assert observation_plan['validity_window_end'] == request_data["payload"][
        'end_date'
    ].replace(" ", "T")

    planned_observations = observation_plan['planned_observations']

    assert len(planned_observations) == 5
    assert all(
        [
            obs['filt'] == request_data["payload"]['filters']
            for obs in planned_observations
        ]
    )
    assert all(
        [
            obs['exposure_time'] == int(request_data["payload"]['exposure_time'])
            for obs in planned_observations
        ]
    )

    status, data = api(
        'GET', f'observation_plan/{id}/simsurvey', token=super_admin_token
    )
    assert status == 200
import os
import sys
import pandas as pd

from getter import get_dataframe_by_definition

from regions import Regions
filter_region_name = Regions.filter_region_name
reference_region_names = Regions.names()

from definitions import definitions

# -------------------------------------------------------------------------
# Defintions
# -------------------------------------------------------------------------

from config import XL_SAMPLE_FOLDER

source_def_sample = {
    'varname': 'PPI_PROM_ytd',
    'folder': XL_SAMPLE_FOLDER,
    'filename': 'industrial_prices.xls',
    'sheet': 'пром.товаров',
    'anchor': 'B5',
    'anchor_value': 96.6
}

def_dict_2 = {
    'varname': 'SHIPMENTS',
    'folder': XL_SAMPLE_FOLDER,
    'filename': 'shipment.xls',
示例#31
0
 def regions(self):
     self.setCentralWidget(Regions(self))
     self.enableTools()
示例#32
0
    def get_data_items(cls,
                       data_items='all',
                       regions='ES',
                       start_date=None,
                       end_date=None,
                       language='ES',
                       errors='ignore'):
        """
        Collects the required Data Items from associated Data Sources

        Parameters
        ----------
        data_items : list of str
            list of data item names. By default, 'all' are collected.
        regions : list of str
            list of region names. By default, 'ES' refers to all Spanish regions.
        start_date : pd.datetime
            first day to be considered in TEMPORAL data items. By default, None is established.
        end_date : pd.datetime
            last day to be considered in TEMPORAL data items. By default, None is established.
        language : str
            language of the returned data. 
                'ES' for Spanish (default value),
                'EN' for English.
        errors : str
            action to be taken when errors occur.
                'ignore' tries to get all possible data items even if some can't be collected,
                'raise' throws an exception and the execution is aborted upon detection of any error. 

        Returns
        -------
        pd.DataFrame 
            a DataFrame with the required information.

        Notes
        -----
        If dates are passed, then it is assumed that TEMPORAL data items are required. Otherwise, a GEOGRAPHICAL retrieval is assumed.
        A TEMPORAL retrieval produces a DataFrame with daily [Date] as row indexer and [Region, Data Item] as column multiindexer.
        A GEOGRAPHICAL retrieval produces a DataFrame with [Region] as row indexer and [Data Item] as column indexer.
        """

        # if data sources are not initialized, lets read configurations
        if not cls.__DATA_SOURCES_INITIALIZED:
            cls.__init_data_sources()

        ##### check of parameters #####

        if data_items == 'all':
            data_items = cls.get_data_items_names(data_type=None,
                                                  language=language)
        else:
            ## check if items are implemented ##

            # get all implemented items
            implemented_data_sources = cls.get_data_items_names(
                data_type=None, language=language)
            implemented_data_items = []
            for implemented_data_source in list(
                    implemented_data_sources.keys()):
                implemented_data_items = implemented_data_items + implemented_data_sources[
                    implemented_data_source]

            # check
            successful_data_items = []
            for data_item in data_items:
                if data_item not in implemented_data_items:
                    print(f'WARNING: Item {data_item} is not implemented')
                else:
                    successful_data_items.append(data_item)

            if not successful_data_items:
                print(
                    'WARNING: No result found for the specified data items and conditions'
                )
                return None

            data_items = successful_data_items

        if regions == 'ES':
            regions = Regions.get_regions('ES')

        if start_date is None or end_date is None:
            assumed_data_type = DataType.GEOGRAPHICAL
        else:
            assumed_data_type = DataType.TEMPORAL

        print("Assumed a " + str(assumed_data_type.name) +
              " data retrieval...")

        if assumed_data_type is DataType.TEMPORAL:
            if start_date > end_date:
                print('ERROR: start_date (' + str(start_date) +
                      ') should be smaller or equal than end_date (' +
                      str(start_date) + ')')
                return None

            if end_date > pd.to_datetime('today').date():
                print('ERROR: end_date (' + str(end_date) +
                      ') should not refer to the future')
                return None

        ### change data items (display names) to internal representation ###

        internalname_displayname_dict = cls._get_internal_names_mapping(
            assumed_data_type, data_items, language=language
        )  # get internal name - display name dict (then is used to rename again)
        if internalname_displayname_dict is None:
            return None
        data_items = list(internalname_displayname_dict.keys()
                          )  # change data_items to internal representation

        ### group data items by data source in dictionary ###

        # existing items for assumed data type
        items_by_source = cls.get_data_items_names(
            data_type=assumed_data_type,
            language=language)  # dict with : source -> [item1, item2]
        items_by_assumed_data_type = []
        for items in items_by_source.values():
            items_by_assumed_data_type = items_by_assumed_data_type + items

        # group requested items by data sources
        requested_items_by_source = defaultdict(
            list
        )  # dict  datasource : [requested item 1, requested item 2, ...]
        for data_item in data_items:
            source_class_found = False
            source = 0
            while source < len(
                    cls.__DATA_SOURCE_CLASSES) and not source_class_found:
                source_class_found = cls.__DATA_SOURCE_CLASSES[
                    source].data_item_exists(data_item)
                source = source + 1
            if source_class_found:
                requested_items_by_source[cls.__DATA_SOURCE_CLASSES[
                    source - 1]].append(data_item)
            else:
                # never should get there
                print('WARNING: Data source not found for item \'' +
                      str(data_item) + '\'')

        ##### data retrieval #####
        df_all_data_sources = None

        ## get data by data source ##
        for DATA_SOURCE_CLASS in requested_items_by_source.keys():

            df_data_source = None

            data_items = requested_items_by_source[DATA_SOURCE_CLASS]

            # for temporal data type
            if assumed_data_type is DataType.TEMPORAL:

                df_data_source = DATA_SOURCE_CLASS(data_items, regions,
                                                   start_date,
                                                   end_date).get_data(errors)

                if df_data_source is not None:
                    df_data_source = cls.__complete_dates(
                        df_data_source, start_date, end_date
                    )  # complete with nan values those days without info

            # for geographical data type
            elif assumed_data_type is DataType.GEOGRAPHICAL:
                df_data_source = DATA_SOURCE_CLASS(data_items,
                                                   regions).get_data(errors)
            else:
                # never should get here
                return None

            # continuous joining of data from diverse data sources
            if df_data_source is not None:
                if df_all_data_sources is None:
                    df_all_data_sources = df_data_source.sort_index(axis=1)
                else:
                    df_all_data_sources = pd.concat(
                        [df_all_data_sources, df_data_source],
                        axis='columns').sort_index(axis=1)

        ## END: get data by data source ##

        if df_all_data_sources is None:
            print(
                'WARNING: No result found for the specified data items and conditions'
            )
            return None

        def rename_with_regex(col_name):
            for internal_name in list(internalname_displayname_dict.keys()):
                if re.match(f"^{internal_name}$|^{internal_name} \(",
                            col_name):
                    return re.sub(
                        pattern=internal_name,
                        repl=internalname_displayname_dict[internal_name],
                        string=col_name)
            return 'None'

        df_all_data_sources.rename(columns=rename_with_regex,
                                   level='Item',
                                   inplace=True)

        ### filter retrieved data to match the specific query determined by data_items, regions and dates ###

        # filter requested data_items (some data sources request in the same query more data items than the ones requested )
        df_all_data_sources = df_all_data_sources.loc[:,
                                                      df_all_data_sources.
                                                      columns.get_level_values(
                                                          'Item') != 'None']

        if assumed_data_type is DataType.TEMPORAL:
            df_all_data_sources = df_all_data_sources[
                (df_all_data_sources.index >= start_date) &
                (df_all_data_sources.index <=
                 end_date)]  # to filter requested dates (indexes)
            df_all_data_sources = df_all_data_sources.loc[:,
                                                          df_all_data_sources.
                                                          columns.
                                                          get_level_values(
                                                              'Region'
                                                          ).isin(
                                                              regions
                                                          )]  # to filter dates (indexes)
        else:
            df_all_data_sources = df_all_data_sources[
                df_all_data_sources.index.isin(
                    regions)]  # to filter requested regions (indexes)

        df_all_data_sources = df_all_data_sources.loc[:, ~df_all_data_sources.
                                                      columns.duplicated()]
        return df_all_data_sources
示例#33
0
def generate_plan(observation_plan_id, request_id, user_id):
    """Use gwemopt to construct observing plan."""

    from ..models import DBSession
    from skyportal.handlers.api.instrument import add_tiles

    Session = scoped_session(
        sessionmaker(bind=DBSession.session_factory.kw["bind"]))

    import gwemopt
    import gwemopt.utils
    import gwemopt.segments
    import gwemopt.skyportal

    from ..models import (
        EventObservationPlan,
        Galaxy,
        InstrumentField,
        ObservationPlanRequest,
        PlannedObservation,
        User,
    )

    session = Session()
    try:
        plan = session.query(EventObservationPlan).get(observation_plan_id)
        request = session.query(ObservationPlanRequest).get(request_id)
        user = session.query(User).get(user_id)

        event_time = Time(request.gcnevent.dateobs,
                          format='datetime',
                          scale='utc')
        start_time = Time(request.payload["start_date"],
                          format='iso',
                          scale='utc')
        end_time = Time(request.payload["end_date"], format='iso', scale='utc')

        params = {
            'config': {
                request.instrument.name: {
                    # field list from skyportal
                    'tesselation': request.instrument.fields,
                    # telescope longitude [deg]
                    'longitude': request.instrument.telescope.lon,
                    # telescope latitude [deg]
                    'latitude': request.instrument.telescope.lat,
                    # telescope elevation [m]
                    'elevation': request.instrument.telescope.elevation,
                    # telescope name
                    'telescope': request.instrument.name,
                    # telescope horizon
                    'horizon': -12.0,
                    # time in seconds to change the filter
                    'filt_change_time': 0.0,
                    # extra overhead in seconds
                    'overhead_per_exposure': 0.0,
                    # slew rate for the telescope [deg/s]
                    'slew_rate': 2.6,
                    # camera readout time
                    'readout': 0.0,
                    # telescope field of view
                    'FOV': 0.0,
                    # exposure time for the given limiting magnitude
                    'exposuretime': 1.0,
                    # limiting magnitude given telescope time
                    'magnitude': 0.0,
                },
            },
            # gwemopt filter strategy
            # options: block (blocks of single filters), integrated (series of alternating filters)
            'doAlternativeFilters':
            request.payload["filter_strategy"] == "block",
            # flag to indicate fields come from DB
            'doDatabase':
            True,
            # only keep tiles within powerlaw_cl
            'doMinimalTiling':
            True,
            # single set of scheduled observations
            'doSingleExposure':
            True,
            # gwemopt scheduling algorithms
            # options: greedy, greedy_slew, sear, airmass_weighted
            'scheduleType':
            request.payload["schedule_type"],
            # list of filters to use for observations
            'filters':
            request.payload["filters"].split(","),
            # GPS time for event
            'gpstime':
            event_time.gps,
            # Healpix nside for the skymap
            'nside':
            512,
            # maximum integrated probability of the skymap to consider
            'powerlaw_cl':
            request.payload["integrated_probability"],
            'telescopes': [request.instrument.name],
            # minimum difference between observations of the same field
            'mindiff':
            request.payload["minimum_time_difference"],
            # maximum airmass with which to observae
            'airmass':
            request.payload["maximum_airmass"],
            # array of exposure times (same length as filter array)
            'exposuretimes':
            np.array([int(request.payload["exposure_time"])] *
                     len(request.payload["filters"].split(","))),
        }

        if request.payload["schedule_strategy"] == "galaxy":
            params = {
                **params,
                'tilesType': 'galaxy',
                'galaxy_catalog': request.payload["galaxy_catalog"],
                'galaxy_grade': 'S',
                'writeCatalog': False,
                'catalog_n': 1.0,
                'powerlaw_dist_exp': 1.0,
            }
        elif request.payload["schedule_strategy"] == "tiling":
            params = {**params, 'tilesType': 'moc'}
        else:
            raise AttributeError(
                'scheduling_strategy should be tiling or galaxy')

        params = gwemopt.utils.params_checker(params)
        params = gwemopt.segments.get_telescope_segments(params)

        params["Tobs"] = [
            start_time.mjd - event_time.mjd,
            end_time.mjd - event_time.mjd,
        ]

        params['map_struct'] = dict(
            zip(['prob', 'distmu', 'distsigma', 'distnorm'],
                request.localization.flat))

        params['is3D'] = request.localization.is_3d

        # Function to read maps
        map_struct = gwemopt.utils.read_skymap(params,
                                               is3D=params["do3D"],
                                               map_struct=params['map_struct'])

        if params["tilesType"] == "galaxy":
            query = Galaxy.query_records_accessible_by(user, mode="read")
            query = query.filter(
                Galaxy.catalog_name == params["galaxy_catalog"])
            galaxies = query.all()
            catalog_struct = {}
            catalog_struct["ra"] = np.array([g.ra for g in galaxies])
            catalog_struct["dec"] = np.array([g.dec for g in galaxies])
            catalog_struct["S"] = np.array([1.0 for g in galaxies])
            catalog_struct["Sloc"] = np.array([1.0 for g in galaxies])
            catalog_struct["Smass"] = np.array([1.0 for g in galaxies])

        if params["tilesType"] == "moc":
            moc_structs = gwemopt.skyportal.create_moc_from_skyportal(
                params, map_struct=map_struct)
            tile_structs = gwemopt.tiles.moc(params, map_struct, moc_structs)
        elif params["tilesType"] == "galaxy":
            if request.instrument.region is None:
                raise ValueError(
                    'Must define the instrument region in the case of galaxy requests'
                )
            regions = Regions.parse(request.instrument.region, format='ds9')
            tile_structs = gwemopt.skyportal.create_galaxy_from_skyportal(
                params, map_struct, catalog_struct, regions=regions)

        tile_structs, coverage_struct = gwemopt.coverage.timeallocation(
            params, map_struct, tile_structs)

        # if the fields do not yet exist, we need to add them
        if params["tilesType"] == "galaxy":
            regions = Regions.parse(request.instrument.region, format='ds9')
            data = {
                'RA': coverage_struct["data"][:, 0],
                'Dec': coverage_struct["data"][:, 1],
            }
            field_data = pd.DataFrame.from_dict(data)
            field_ids = add_tiles(
                request.instrument.id,
                request.instrument.name,
                regions,
                field_data,
                session=session,
            )

        planned_observations = []
        for ii in range(len(coverage_struct["ipix"])):
            data = coverage_struct["data"][ii, :]
            filt = coverage_struct["filters"][ii]
            mjd = data[2]
            tt = Time(mjd, format='mjd')

            overhead_per_exposure = params["config"][
                request.instrument.name]["overhead_per_exposure"]

            exposure_time, prob = data[4], data[6]
            if params["tilesType"] == "galaxy":
                field_id = field_ids[ii]
            else:
                field_id = data[5]

            field = InstrumentField.query.filter(
                InstrumentField.instrument_id == request.instrument.id,
                InstrumentField.field_id == field_id,
            ).first()
            if field is None:
                return log(f"Missing field {field_id} from list")

            planned_observation = PlannedObservation(
                obstime=tt.datetime,
                dateobs=request.gcnevent.dateobs,
                field_id=field.id,
                exposure_time=exposure_time,
                weight=prob,
                filt=filt,
                instrument_id=request.instrument.id,
                planned_observation_id=ii,
                observation_plan_id=plan.id,
                overhead_per_exposure=overhead_per_exposure,
            )
            planned_observations.append(planned_observation)

        session.add_all(planned_observations)
        plan.status = 'complete'
        session.merge(plan)
        session.commit()

        request.status = 'complete'
        session.merge(request)
        session.commit()

        flow = Flow()
        flow.push(
            '*',
            "skyportal/REFRESH_GCNEVENT",
            payload={"gcnEvent_dateobs": request.gcnevent.dateobs},
        )

        return log(
            f"Generated plan for observation plan {observation_plan_id}")

    except Exception as e:
        return log(
            f"Unable to generate plan for observation plan {observation_plan_id}: {e}"
        )
    finally:
        Session.remove()
from regions import Regions
z = Regions.summable_regions()
print(z)

for r in Regions.district_names():
   w = Regions.region_by_district(r)
   print ('\n', r)
   print (w)
   
# passes now   
# assert set(Regions.region_by_district("Уральский федеральный округ")) == set (['Курганская область', 'Свердловская область', 'Ханты-Мансийский авт. округ - Югра', 'Ямало-Ненецкий авт. округ', 'Тюменская область без авт. округов', 'Челябинская область'])

# must pass:
assert set(Regions.region_by_district("Уральский федеральный округ")) == set (['Курганская область', 
      'Свердловская область', 
      #'Ханты-Мансийский авт. округ - Югра', 
      #'Ямало-Ненецкий авт. округ', 
      #'Тюменская область без авт. округов', 
      'Тюменская область',
      'Челябинская область'])

print(Regions.region_by_district("Северо-Западный федеральный округ"))

# Regions.region_by_district("Северо-Западный федеральный округ") must return:
['Республика Карелия', 
'Республика Коми', 
 #'Ненецкий авт. округ', 
 #'Архангельская область без авт. округа',
'Архангельская область',  
'Вологодская область', 
'Калининградская область',