def get(self, request):
        filters = request.GET.dict()
        filters['validated'] = ''
        search = CollectionSearch(filters)
        page = int(filters.get('page', 1))
        collection_records = search.process_search()
        collection_records_ids = collection_records.values_list(
            'id', flat=True
        )
        collection_records_site_ids = collection_records.distinct(
            'site'
        ).values_list(
            'site__id', flat=True
        )
        self.location_sites = LocationSite.objects.filter(
            id__in=list(collection_records_site_ids)
        ).order_by('site_code')

        paginator = Paginator(self.location_sites, MAX_RESULTS_PER_PAGE)
        if page > paginator.num_pages:
            page = paginator.count
        current_page = paginator.page(page)
        self.location_sites = current_page.object_list

        self.site_visit_taxa = SiteVisitTaxon.objects.filter(
            id__in=list(collection_records_ids),
            site__in=list(self.location_sites.values_list('id', flat=True))
        )
        sass_score_chart_data = self.get_sass_score_chart_data()
        taxa_per_biotope_data = self.get_taxa_per_biotope_data()
        biotope_ratings_chart_data = self.get_biotope_ratings_chart_data(
            sass_ids=sass_score_chart_data['sass_ids']
        )
        coordinates = []
        ecological_chart_data, unique_ecoregions = (
            self.get_ecological_chart_data()
        )
        for location_site in self.location_sites:
            coordinates.append({
                'x': location_site.get_centroid().x,
                'y': location_site.get_centroid().y
            })

        collection_with_references = self.site_visit_taxa.exclude(
            source_reference__isnull=True
        ).distinct('source_reference')

        source_references = collection_with_references.source_references()

        return Response({
            'total_pages': paginator.num_pages,
            'current_page': page,
            'sass_score_chart_data': sass_score_chart_data,
            'taxa_per_biotope_data': taxa_per_biotope_data,
            'biotope_ratings_chart_data': biotope_ratings_chart_data,
            'ecological_chart_data': ecological_chart_data,
            'unique_ecoregions': unique_ecoregions,
            'coordinates': coordinates,
            'source_references': source_references
        })
Beispiel #2
0
def download_sass_summary_data(request):
    """
    Download sass data summary
    """
    filters = request.GET
    search = CollectionSearch(filters)
    collection_records = search.process_search()

    # Get SASS data
    site_visit_taxa = SiteVisitTaxon.objects.filter(
        id__in=list(collection_records.values_list('id', flat=True)),
        taxonomy__taxongroup__category=(
            TaxonomicGroupCategory.SASS_TAXON_GROUP.name))
    if not site_visit_taxa:
        response_message = 'No SASS data for this site'
        return JsonResponse(get_response(FAILED_STATUS, response_message))

    # Filename
    search_uri = request.build_absolute_uri()
    path_file, filename = get_filename(search_uri, site_visit_taxa.count())

    if os.path.exists(path_file):
        return JsonResponse(get_response(SUCCESS_STATUS, filename))

    download_sass_summary_data_task.delay(filename, filters, path_file)

    return JsonResponse(get_response(PROCESSING_STATUS, filename))
Beispiel #3
0
 def get_site_visit_taxon(self):
     filters = self.request.GET.dict()
     filters['validated'] = ''
     search = CollectionSearch(filters)
     collection_records = search.process_search()
     self.site_visit_taxa = SiteVisitTaxon.objects.filter(
         id__in=collection_records,
         taxonomy__taxongroup__category=(
             TaxonomicGroupCategory.SASS_TAXON_GROUP.name
         )
     )
    def get(self, request):
        site_id = request.GET.get('siteId')
        filters = request.GET

        # Search collection
        search = CollectionSearch(filters)
        collection_results = search.process_search()
        context = {'collection_results': collection_results}
        location_site = self.get_object(site_id)
        serializer = LocationSiteDetailSerializer(location_site,
                                                  context=context)
        return Response(serializer.data)
Beispiel #5
0
    def get(self, request):
        filters = request.GET
        search = CollectionSearch(filters)

        # Search collection
        collection_results = search.process_search()

        try:
            serializer = BioCollectionSerializer(collection_results, many=True)
            return Response(serializer.data)
        except BiologicalCollectionRecord.DoesNotExist:
            return HttpResponse('Object Does Not Exist',
                                status=status.HTTP_400_BAD_REQUEST)
Beispiel #6
0
def search_task(parameters, search_process_id, background=True):
    from bims.utils.celery import memcache_lock
    from bims.api_views.search import CollectionSearch
    from bims.models.search_process import (
        SearchProcess,
        SEARCH_PROCESSING,
        SEARCH_FINISHED,
        SEARCH_FAILED
    )

    try:
        search_process = SearchProcess.objects.get(id=search_process_id)
    except SearchProcess.DoesNotExist:
        return

    if background:
        lock_id = '{0}-lock-{1}'.format(
            search_process.file_path,
            search_process.process_id
        )
        oid = '{0}'.format(search_process.process_id)
        with memcache_lock(lock_id, oid) as acquired:
            if acquired:
                search_process.set_status(SEARCH_PROCESSING)

                search = CollectionSearch(parameters)
                search_results = search.get_summary_data()
                if search_results:
                    search_process.set_search_raw_query(
                        search.location_sites_raw_query
                    )
                    search_process.create_view()
                    search_process.set_status(SEARCH_FINISHED, False)
                    search_results['status'] = SEARCH_FINISHED
                    search_results['extent'] = search.extent()
                    search_process.save_to_file(search_results)
                else:
                    search_process.set_status(SEARCH_FAILED)
                return
        logger.info(
            'Search %s is already being processed by another worker',
            search_process.process_id)
    else:
        search = CollectionSearch(parameters)
        search_results = search.get_summary_data()
        if search_results:
            search_process.set_search_raw_query(
                search.location_sites_raw_query
            )
            search_process.create_view()
            search_process.set_status(SEARCH_FINISHED, False)
            search_results['status'] = SEARCH_FINISHED
            search_results['extent'] = search.extent()
            search_process.save_to_file(search_results)
        else:
            search_process.set_status(SEARCH_FAILED)
        return search_results
Beispiel #7
0
    def get(self, request):
        filters = request.GET
        search = CollectionSearch(filters)
        file_type = request.GET.get('fileType', None)
        if not file_type:
            file_type = 'csv'
        site_results = None

        collection_results = search.process_search()

        if file_type == 'csv':
            return self.convert_to_cvs(
                collection_results,
                site_results,
                BioCollectionOneRowSerializer)
        elif file_type == 'geojson':
            return self.convert_to_geojson(
                collection_results,
                BiologicalCollectionRecord,
                BioCollectionGeojsonSerializer)
        else:
            return Response([])
Beispiel #8
0
def download_sass_data_site_task(filename, filters, path_file):
    from bims.utils.celery import memcache_lock

    lock_id = '{0}-lock-{1}'.format(filename, len(filters))
    oid = '{0}'.format(filename)

    with memcache_lock(lock_id, oid) as acquired:
        if acquired:
            search = CollectionSearch(filters)
            context = {'filters': filters}
            collection_records = search.process_search()
            site_visit_taxon = SiteVisitTaxon.objects.filter(
                id__in=collection_records).order_by(
                    'site_visit__site_visit_date')
            serializer = SassDataSerializer(site_visit_taxon,
                                            many=True,
                                            context=context)
            headers = serializer.data[0].keys()
            rows = serializer.data

            formatted_headers = []
            # Rename headers
            for header in headers:
                formatted_headers.append(header.replace('_', ' ').capitalize())

            with open(path_file, 'w') as csv_file:
                writer = csv.DictWriter(csv_file, fieldnames=formatted_headers)
                writer.writeheader()
                writer.fieldnames = headers
                for row in rows:
                    try:
                        writer.writerow(row)
                    except ValueError:
                        writer.fieldnames = row.keys()
                        writer.writerow(row)
            return
    logger.info('Csv %s is already being processed by another worker',
                filename)
Beispiel #9
0
    def get_queryset(self):
        """
        Add GET requests filters
        """
        search_filters = self.request.GET.dict()

        # Remove page in filters
        if 'page' in search_filters:
            del search_filters['page']

        # Base queryset
        qs = super(SiteVisitListView, self).get_queryset()

        if search_filters:
            search = CollectionSearch(search_filters)
            self.collection_results = search.process_search()
            qs = qs.filter(
                id__in=self.collection_results.values('survey')).annotate(
                    total=Count('biological_collection_record'))
        else:
            self.collection_results = BiologicalCollectionRecord.objects.all()

        return qs.order_by('-date')
Beispiel #10
0
def download_sass_summary_data_task(filename, filters, path_file):
    from bims.utils.celery import memcache_lock
    import random

    lock_id = '{0}-lock-{1}'.format(filename, len(filters))
    oid = random.randint(1, 101)
    with memcache_lock(lock_id, oid) as acquired:
        if acquired:
            search = CollectionSearch(filters)
            context = {'filters': filters}

            collection_records = search.process_search()
            collection_ids = list(
                collection_records.values_list('id', flat=True))
            # Get SASS data
            site_visit_taxa = SiteVisitTaxon.objects.filter(
                id__in=collection_ids)
            summary = site_visit_taxa.annotate(
                date=F('collection_date'), ).values('date').annotate(
                    sampling_date=F('site_visit__site_visit_date'),
                    full_name=Concat('site_visit__owner__first_name',
                                     Value(' '),
                                     'site_visit__owner__last_name',
                                     output_field=CharField())
                ).values('sampling_date', 'full_name').annotate(
                    count=Count('sass_taxon'),
                    sass_score=Sum(
                        Case(When(condition=Q(
                            site_visit__sass_version=5,
                            sass_taxon__sass_5_score__isnull=False),
                                  then='sass_taxon__sass_5_score'),
                             default='sass_taxon__score')),
                    sass_id=F('site_visit__id'),
                    FBIS_site_code=Case(
                        When(
                            site_visit__location_site__site_code__isnull=False,
                            then='site_visit__location_site__site_code'),
                        default='site_visit__location_site__name'),
                    site_id=F('site_visit__location_site__id'),
                    sass_version=F('site_visit__sass_version'),
                    site_description=F(
                        'site_visit__location_site__site_description'),
                    river_name=Case(When(
                        site_visit__location_site__river__isnull=False,
                        then='site_visit__location_site__river__name'),
                                    default=Value('-')),
                    latitude=F('site_visit__location_site__latitude'),
                    longitude=F('site_visit__location_site__longitude'),
                    source_reference=F('source_reference'),
                    ecological_category=F(
                        'site_visit__'
                        'sitevisitecologicalcondition__'
                        'ecological_condition__category')).annotate(
                            aspt=Cast(F('sass_score'), FloatField()) /
                            Cast(F('count'),
                                 FloatField()), ).order_by('sampling_date')
            context['location_contexts'] = LocationContext.objects.filter(
                site__in=site_visit_taxa.values('site_visit__location_site'))

            serializer = SassSummaryDataSerializer(summary,
                                                   many=True,
                                                   context=context)
            headers = serializer.data[0].keys()
            rows = serializer.data
            formatted_headers = []

            # Rename headers
            for header in headers:
                header_split = [
                    word[0].upper() + word[1:] for word in header.split('_')
                ]
                header = ' '.join(header_split)
                formatted_headers.append(header)

            with open(path_file, 'wb') as csv_file:
                writer = csv.DictWriter(csv_file, fieldnames=formatted_headers)
                writer.writeheader()
                writer.fieldnames = headers
                for row in rows:
                    try:
                        writer.writerow(row)
                    except ValueError:
                        writer.fieldnames = row.keys()
                        writer.writerow(row)
            return
    logger.info('Csv %s is already being processed by another worker',
                filename)
Beispiel #11
0
    def generate(self, filters, search_process):
        search = CollectionSearch(filters)
        collection_results = search.process_search()
        site_id = filters['siteId']

        self.iucn_category = dict(
            (x, y) for x, y in IUCNStatus.CATEGORY_CHOICES)

        self.origin_name_list = dict(
            (x, y) for x, y in Taxonomy.CATEGORY_CHOICES)

        taxa_occurrence = self.site_taxa_occurrences_per_year(
            collection_results)

        category_summary = collection_results.exclude(category='').annotate(
            origin=F('category')).values_list('origin').annotate(
                count=Count('category'))
        is_multi_sites = False
        is_sass_exists = False

        if site_id:
            site_details = self.get_site_details(site_id)
            site_details['Species and Occurences'] = (
                self.get_number_of_records_and_taxa(collection_results))
        else:
            is_multi_sites = True
            site_details = self.multiple_site_details(collection_results)
            is_sass_exists = collection_results.filter(
                notes__icontains='sass').exists()
        search_process.set_search_raw_query(search.location_sites_raw_query)
        search_process.set_status(SEARCH_FINISHED, False)
        search_process.create_view()

        biodiversity_data = self.get_biodiversity_data(collection_results)
        site_images = []
        if not is_multi_sites:
            site_image_objects = SiteImage.objects.filter(
                Q(survey__in=list(
                    collection_results.distinct('survey').values_list(
                        'survey__id', flat=True)))
                | Q(site_id=int(site_id))).values_list('image', flat=True)
            for site_image in site_image_objects:
                site_images.append(
                    get_thumbnail(site_image,
                                  'x500',
                                  crop='center',
                                  quality=99).url)

        # Check module
        modules = []
        if 'modules' in filters:
            modules = list(
                TaxonGroup.objects.filter(
                    category=TaxonomicGroupCategory.SPECIES_MODULE.name,
                    id=filters['modules']).values_list('name', flat=True))

        # - Survey
        survey_list = []
        surveys = Survey.objects.filter(
            id__in=collection_results.values('survey')).order_by('-date')
        for survey in surveys[:5]:
            survey_list.append({
                'date':
                str(survey.date),
                'site':
                str(survey.site),
                'id':
                survey.id,
                'records': (BiologicalCollectionRecord.objects.filter(
                    survey=survey).count())
            })

        # - Source references
        collection_with_references = collection_results.exclude(
            source_reference__isnull=True).distinct('source_reference')

        source_references = collection_with_references.source_references()

        # - Chemical data
        list_chems = {}
        chem_exist = False
        if site_id:
            list_chems_code = ['COND', 'TEMP', 'PH', 'DO']
            chems = ChemicalRecord.objects.filter(
                Q(location_site_id=site_id) | Q(survey__site_id=site_id))
            x_label = []
            if chems.count() > 0:
                chem_exist = True
            for chem in list_chems_code:
                chem_name = chem.lower().replace('-n', '').upper()
                qs = chems.filter(chem__chem_code=chem).order_by('date')
                if not qs:
                    continue
                value = ChemicalRecordsSerializer(qs, many=True)
                # Get chemical unit
                try:
                    chem_unit = ChemUnit[qs[0].chem.chem_unit].value
                except KeyError:
                    chem_unit = qs[0].chem.chem_unit
                data = {
                    'unit': chem_unit,
                    'name': qs[0].chem.chem_description,
                    'values': value.data
                }
                for val in value.data:
                    if val['str_date'] not in x_label:
                        x_label.append(val['str_date'])
                try:
                    list_chems[chem_name].append({chem: data})
                except KeyError:
                    list_chems[chem_name] = [{chem: data}]
            list_chems['x_label'] = x_label

        try:
            dashboard_configuration = json.loads(
                DashboardConfiguration.objects.get(
                    module_group__id=filters['modules']).additional_data)
        except (DashboardConfiguration.DoesNotExist, KeyError, ValueError):
            dashboard_configuration = {}

        response_data = {
            self.TOTAL_RECORDS: collection_results.count(),
            self.SITE_DETAILS: dict(site_details),
            self.TAXA_OCCURRENCE: dict(taxa_occurrence),
            self.CATEGORY_SUMMARY: dict(category_summary),
            self.OCCURRENCE_DATA: self.occurrence_data(collection_results),
            self.IUCN_NAME_LIST: self.iucn_category,
            self.ORIGIN_NAME_LIST: self.origin_name_list,
            self.BIODIVERSITY_DATA: dict(biodiversity_data),
            self.SOURCE_REFERENCES: source_references,
            self.CHEMICAL_RECORDS: list_chems,
            self.SURVEY: survey_list,
            'modules': modules,
            'site_images': list(site_images),
            'process': search_process.process_id,
            'extent': search.extent(),
            'sites_raw_query': search_process.process_id,
            'is_multi_sites': is_multi_sites,
            'is_sass_exists': is_sass_exists,
            'is_chem_exists': chem_exist,
            'total_survey': surveys.count(),
            'dashboard_configuration': dashboard_configuration
        }
        create_search_process_file(response_data,
                                   search_process,
                                   file_path=None,
                                   finished=True)
        return response_data
Beispiel #12
0
    def get(self, request):
        filters = request.GET
        search = CollectionSearch(filters)

        search_process, created = get_or_create_search_process(
            TAXON_SUMMARY, query=request.build_absolute_uri())

        if search_process.file_path:
            if os.path.exists(search_process.file_path):
                try:
                    raw_data = open(search_process.file_path)
                    return Response(json.load(raw_data))
                except ValueError:
                    pass

        collection_results = search.process_search()
        response_data = {}

        if not collection_results:
            return HttpResponse('Object Does Not Exist',
                                status=status.HTTP_400_BAD_REQUEST)
        records_over_time = collection_results.annotate(
            year=ExtractYear('collection_date')).values('year').annotate(
                count=Count('year')).order_by('year')
        records_per_area = collection_results.annotate(
            site_name=F('site__name')).values('site_name').annotate(
                count=Count('site_name'),
                site_code=F('site__site_code'),
                site_id=F('site__id'),
                river=F('site__river__name'))

        taxonomy = collection_results[0].taxonomy

        search_process.set_search_raw_query(search.location_sites_raw_query)
        search_process.create_view()
        endemic = None
        if taxonomy.endemism:
            endemic = taxonomy.endemism.name
        iucn_status = None
        if taxonomy.iucn_status:
            iucn_status = taxonomy.iucn_status.category
        response_data['iucn_id'] = taxonomy.iucn_redlist_id
        response_data['taxon'] = taxonomy.scientific_name
        response_data['gbif_id'] = taxonomy.gbif_key
        response_data['total_records'] = len(collection_results)
        response_data['conservation_status'] = iucn_status
        response_data['origin'] = collection_results[0].category
        response_data['endemism'] = endemic
        response_data['records_over_time_labels'] = (list(
            records_over_time.values_list('year', flat=True)))
        response_data['records_over_time_data'] = (list(
            records_over_time.values_list('count', flat=True)))
        response_data['records_per_area'] = list(records_per_area)
        response_data['sites_raw_query'] = search_process.process_id
        response_data['process_id'] = search_process.process_id
        response_data['extent'] = search.extent()
        response_data['origin_choices_list'] = (
            BiologicalCollectionRecord.CATEGORY_CHOICES)
        response_data['iucn_choice_list'] = IUCNStatus.CATEGORY_CHOICES

        taxonomy_rank = {taxonomy.rank: taxonomy.scientific_name}
        taxonomy_parent = taxonomy.parent
        while taxonomy_parent:
            taxonomy_rank[taxonomy_parent.rank] = (
                taxonomy_parent.canonical_name)
            taxonomy_parent = taxonomy_parent.parent
        response_data['taxonomy_rank'] = taxonomy_rank

        common_names = []
        # Common name
        if taxonomy.vernacular_names.filter(language='eng').exists():
            common_names = list(taxonomy.vernacular_names.all().filter(
                language='eng').values())
        elif taxonomy.vernacular_names.all().values().exists():
            common_names = list(taxonomy.vernacular_names.all().values())
        if len(common_names) == 0:
            response_data['common_name'] = 'Unknown'
        else:
            response_data['common_name'] = str(
                common_names[0]['name']).capitalize()

        # Source references
        collection_with_references = collection_results.exclude(
            source_reference__isnull=True).distinct('source_reference')
        source_references = collection_with_references.source_references()
        response_data['source_references'] = source_references

        file_path = create_search_process_file(data=response_data,
                                               search_process=search_process,
                                               finished=True)
        file_data = open(file_path)

        try:
            return Response(json.load(file_data))
        except ValueError:
            return Response(response_data)
Beispiel #13
0
    def get(self, request):
        """
        Get closest sites by lat, long, and radius provided in request
        parameter
        :param request: get request object
        :return: list of dict of site data e.g. {
            'id': 1,
            'name': 'site',
            'site_code': '121',
            'distance_m': 1,
            'latitude': -12,
            'longitude': 23
        }
        """
        lat = request.GET.get('lat', None)
        lon = request.GET.get('lon', None)
        radius = request.GET.get('radius', 0.0)
        process_id = request.GET.get('process_id', None)
        search_mode = request.GET.get('search_mode', None)
        radius = float(radius)

        if not lat or not lon:
            return Response('Missing lat/lon')

        try:
            lat = float(lat)
            lon = float(lon)
            point = Point(lon, lat)
        except (ValueError, TypeError):
            return Response('Invalid lat or lon format')

        if search_mode:
            search = CollectionSearch(request.GET.dict())
            collection_results = search.process_search()
            site_ids = collection_results.filter(
                site__geometry_point__distance_lte=(point, D(
                    km=radius))).distinct('site').values_list('site',
                                                              flat=True)
            location_sites = LocationSite.objects.filter(
                id__in=site_ids).annotate(distance=Distance(
                    'geometry_point', point)).order_by('distance')[:10]
        else:
            if not process_id:
                location_sites = LocationSite.objects.filter(
                    biological_collection_record__validated=True).distinct()
            else:
                location_sites = LocationSite.objects.all()
            location_sites = location_sites.filter(
                geometry_point__distance_lte=(point, D(
                    km=radius))).annotate(distance=Distance(
                        'geometry_point', point)).order_by('distance')[:10]

        responses = []
        for site in location_sites:
            responses.append({
                'id': site.id,
                'name': site.name,
                'site_code': site.site_code,
                'distance_m': site.distance.m,
                'latitude': site.get_centroid().y,
                'longitude': site.get_centroid().x
            })

        return Response(responses)