コード例 #1
0
 def test_vector_hierarchy_dimension(self):
     dim0 = data_marts_api.create_vector_dimension('ncl_adm0')
     dim1 = data_marts_api.create_vector_dimension('ncl_adm1')
     vh_dim = VectorHierarchyDimension('ncl_adm', [dim0, dim1])
     vh_dim.create_dimension()
     loaded_dim = data_marts_api.get_dimension('ncl_adm')
     self.assertIsInstance(loaded_dim, VectorHierarchyDimension)
     self.assertEqual(
         loaded_dim.levels,
         ['ncl_adm0', 'ncl_adm1']
     )
     loaded_dim.populate_from_publisher()
     # TODO: Test values
     loaded_dim.get_cubes_dict()
     loaded_dim.get_cubes_joins()
     loaded_dim.get_cubes_mappings()
コード例 #2
0
ファイル: views.py プロジェクト: CheckFly/niamoto-portal
def get_occurrence_location_cuts(selected_entity, invert=False):
    wkt = selected_entity['value']
    dim = get_dimension('occurrence_location')
    df = dim.get_values(wkt_filter=wkt)
    idx = list(df.index.values)
    if len(idx) > 0:
        cuts = [
            SetCut(
                'occurrence_location',
                [[int(i)] for i in idx],
                hierarchy='default',
                invert=invert
            )
        ]
    else:
        cuts = [
            PointCut('occurrence_location', [-1])
        ]
    return cuts
コード例 #3
0
ファイル: views.py プロジェクト: CheckFly/niamoto-portal
def process(request):
    agg = [
        {
            "name": "occurrence_sum",
            "label": "Nombre d'occurrences observées",
            "function": "sum",
            "measure": "occurrence_count",
        },
        {
            "name": "richness",
            "label": "Nombre d'espèces observées",
            "measure": "taxon_dimension_id",
            "function": "count_distinct",
        },
    ]
    dm = get_dimensional_model(
        'taxon_observed_occurrences',
        agg,
    )
    workspace = dm.get_cubes_workspace()
    cube = workspace.cube('taxon_observed_occurrences')
    browser = workspace.browser(cube)
    selected_entity = json.loads(
        request.POST.get('selected_entity', None)
    )
    cuts = []
    invert_location_cuts = []
    invert_env_cuts = []
    invert_env = False
    if selected_entity['type'] == 'draw':
        location_cuts = get_occurrence_location_cuts(selected_entity)
        cuts += location_cuts
        invert_env_cuts += location_cuts
        invert_location_cuts += get_occurrence_location_cuts(
            selected_entity,
            invert=True
        )
        area = 0
    else:
        entity_cut = PointCut(
            selected_entity['type'],
            [selected_entity['value']]
        )
        cuts += [entity_cut]
        invert_env_cuts += [entity_cut]
        invert_location_cuts += [
            PointCut(
                selected_entity['type'],
                [selected_entity['value']],
                invert=True
            ),
        ]
        dim = get_dimension(selected_entity['type'])
        area = dim.get_value(selected_entity['value'], ["area"])[0]
    # Update cuts with rainfall filter
    rainfall_filter = request.POST.get('rainfall_filter', None)
    if rainfall_filter is not None and rainfall_filter != '':
        cuts += [
            PointCut('rainfall', [rainfall_filter])
        ]
        invert_env_cuts += [
            PointCut('rainfall', [rainfall_filter], invert=True)
        ]
        invert_env = True
    # Update cuts with elevation filter
    elevation_filter = request.POST.get('elevation_filter', None)
    if elevation_filter is not None and elevation_filter != '':
        cuts += [
            PointCut('elevation', [elevation_filter])
        ]
        invert_env_cuts += [
            PointCut('elevation', [elevation_filter], invert=True)
        ]
        invert_env = True
    df = pd.DataFrame(list(browser.facts(cell=Cell(cube, cuts))))
    summary = {'occurrence_sum': 0, 'richness': 0}
    records = []
    taxa_ids = pd.Index([])
    if len(df) > 0:
        # Init summary with occurrence_sum
        summary['occurrence_sum'] = df['occurrence_count'].sum()
        # Filter occurrences identified at species level for richness
        df_species = df[df['taxon_dimension.species'] != 'NS']
        # Init records with occurrence sum
        records = pd.DataFrame(
            df_species.groupby(
                ['taxon_dimension.familia', 'taxon_dimension.genus',
                 'taxon_dimension.species']
            )['occurrence_count'].sum(),
            columns=['occurrence_count']
        ).rename(
            columns={'occurrence_count': 'occurrence_sum'},
        )
        records['richness'] = 1
        if len(df_species) > 0:
            taxa_ids = pd.Index(df_species['taxon_dimension_id'].unique())
            # Update summary with richness
            summary['richness'] = df_species['taxon_dimension_id'].nunique()
            # Records to dict
            records = records.reset_index().to_dict(orient='index').values()
    # Compute unique taxa in selected location indicator
    invert_loc_cell = Cell(cube, invert_location_cuts)
    invert_loc_df = pd.DataFrame(list(browser.facts(cell=invert_loc_cell)))
    invert_loc_taxa_ids = pd.Index([])
    if len(invert_loc_df) > 0:
        invert_loc_taxa_ids = pd.Index(
            invert_loc_df['taxon_dimension_id'].unique()
        )
    diff = taxa_ids.difference(invert_loc_taxa_ids)
    if invert_env > 0:
        invert_env_cell = Cell(cube, invert_env_cuts)
        list(browser.facts(cell=invert_env_cell))
        invert_env_df = pd.DataFrame(list(browser.facts(cell=invert_env_cell)))
        if len(invert_env_df) > 0:
            invert_env_taxa_ids = pd.Index(
                invert_env_df['taxon_dimension_id'].unique()
            )
            diff = diff.difference(invert_env_taxa_ids)
    summary['unique_taxa_in_entity'] = len(diff)
    # Extract table attributes
    attributes = [
        'taxon_dimension.familia',
        'taxon_dimension.genus',
        'taxon_dimension.species',
    ]
    attributes_names = []
    for i in attributes:
        attributes_names.append((i, cube.attribute(i).label))
    aggregates_names = [(i.name, i.label) for i in cube.aggregates]
    return Response({
        'summary': summary,
        'records': records,
        'columns': attributes_names + aggregates_names,
        'area': area,
    })
コード例 #4
0
ファイル: views.py プロジェクト: CheckFly/niamoto-portal
 def list(self, request):
     if self.data is None:
         self.data = get_dimension(self.dimension_name).get_values()
     return Response(self.data.to_json())
コード例 #5
0
ファイル: views.py プロジェクト: CheckFly/niamoto-portal
 def retrieve(self, request, pk=None):
     if self.data is None:
         self.data = get_dimension(self.dimension_name).get_values()
     return Response(self.data[self.data.index == int(pk)].to_json())
コード例 #6
0
ファイル: views.py プロジェクト: CheckFly/niamoto-portal
def get_elevation_filters():
    dim = get_dimension('elevation')
    return dim.cuts[1]
コード例 #7
0
ファイル: views.py プロジェクト: CheckFly/niamoto-portal
def get_rainfall_filters():
    dim = get_dimension('rainfall')
    return dim.cuts[1]
コード例 #8
0
ファイル: views.py プロジェクト: CheckFly/niamoto-portal
def get_commune_levels():
    commune_dim = get_dimension('communes')
    labels = commune_dim.get_labels()
    labels = [(str(k), v) for k, v in labels.to_dict().items()]
    return labels
コード例 #9
0
ファイル: views.py プロジェクト: CheckFly/niamoto-portal
def get_province_levels():
    province_dim = get_dimension('provinces')
    labels = province_dim.get_labels()
    labels = [(str(k), v) for k, v in labels.to_dict().items()]
    return labels
コード例 #10
0
 def test_get_dimension(self):
     data_marts_api.create_vector_dimension("ncl_adm1")
     dim = data_marts_api.get_dimension("ncl_adm1")
     self.assertIsInstance(dim, VectorDimension)
コード例 #11
0
 def _process(self, *args, **kwargs):
     dim = data_marts_api.get_dimension('taxon_dim')
     df = dim.get_values()
     df['n'] = df.index
     df['taxon_dim_id'] = df.index
     return df[['taxon_dim_id', 'n']]