示例#1
0
def partial_postcode(request, postcode, format='json'):
    postcode = re.sub(r'\s+', '', postcode.upper())
    if is_valid_postcode(postcode):
        postcode = re.sub(r'\d[A-Z]{2}$', '', postcode)
    if not is_valid_partial_postcode(postcode):
        raise ViewException(format,
                            "Partial postcode '%s' is not valid." % postcode,
                            400)

    location = Postcode.objects.filter(postcode__startswith=postcode).extra(
        where=['length(postcode) = %d' % (len(postcode) + 3)]).aggregate(
            Collect('location'))['location__collect']
    if not location:
        raise ViewException(format, 'Postcode not found', 404)

    postcode = Postcode(postcode=postcode, location=location.centroid)

    if format == 'html':
        return render(
            request, 'mapit/postcode.html', {
                'postcode': postcode.as_dict(),
                'json_view': 'mapit-postcode-partial',
            })

    return output_json(postcode.as_dict())
 def _get_centre_point(self, postcode):
     geom = Address.objects.filter(
         **{self.geom_query: postcode}
     ).aggregate(Collect('point'))['point__collect']
     if geom:
         centre = geom.centroid.coords
         return centre
示例#3
0
def _area_geometry(area_id):
    area = get_object_or_404(Area, id=area_id)
    all_areas = area.polygons.all().aggregate(
        Collect('polygon'))['polygon__collect']
    if not all_areas:
        return output_json({'error': _('No polygons found')}, code=404)
    out = {
        'parts': all_areas.num_geom,
    }
    if settings.MAPIT_AREA_SRID != 4326:
        out['srid_en'] = settings.MAPIT_AREA_SRID
        out['area'] = all_areas.area
        out['min_e'], out['min_n'], out['max_e'], out[
            'max_n'] = all_areas.extent
        out['centre_e'], out['centre_n'] = all_areas.centroid
        all_areas.transform(4326)
        out['min_lon'], out['min_lat'], out['max_lon'], out[
            'max_lat'] = all_areas.extent
        out['centre_lon'], out['centre_lat'] = all_areas.centroid
    else:
        out['min_lon'], out['min_lat'], out['max_lon'], out[
            'max_lat'] = all_areas.extent
        out['centre_lon'], out['centre_lat'] = all_areas.centroid
        if hasattr(countries, 'area_geometry_srid'):
            srid = countries.area_geometry_srid
            all_areas.transform(srid)
            out['srid_en'] = srid
            out['area'] = all_areas.area
            out['min_e'], out['min_n'], out['max_e'], out[
                'max_n'] = all_areas.extent
            out['centre_e'], out['centre_n'] = all_areas.centroid
    return out
示例#4
0
文件: filters.py 项目: ropable/wastd
    def communities_occurring_in_area(self, queryset, name, value):
        """Return Communities occurring in the given Area.

        * The filter returns a list of Area objects as ``value``
        * We need to extract their PKs to create a queryset equivalent to
          the list of objects ``value``. Only querysets allow agggregation, not lists.
        * A search_area Multipolygon is collected from the geoms of Areas in ``value``
        * The Taxon PKs are calculated from occurrences (CommunityAreaEncounters)
          ``intersect``ing the search_area
        * The queryset is filtered by the list of Community PKs with occurrences
          in the matching areas
        """
        if value:
            area_pks = [area.pk for area in value]
            search_area = Area.objects.filter(pk__in=area_pks).aggregate(
                Collect('geom'))["geom__collect"]
            pks = set([
                x["community__pk"]
                for x in occ_models.CommunityAreaEncounter.objects.filter(
                    Q(point__intersects=search_area)
                    | Q(geom__intersects=search_area)).values("community__pk")
            ])
            return queryset.filter(pk__in=pks)
        else:
            return queryset
示例#5
0
 def __collect_polygons(self, area):
     all_polygons = area.polygons.all()
     if len(all_polygons) > 1:
         all_polygons = all_polygons.aggregate(Collect('polygon'))['polygon__collect']
     elif len(all_polygons) == 1:
         all_polygons = all_polygons[0].polygon
     else:
         return None
     return all_polygons
    def test14_collect(self):
        "Testing the `collect` GeoQuerySet method and `Collect` aggregate."
        # Reference query:
        # SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
        #    "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
        #    WHERE "relatedapp_city"."state" = 'TX';
        ref_geom = fromstr('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')

        c1 = City.objects.filter(state='TX').collect(field_name='location__point')
        c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']

        for coll in (c1, c2):
            # Even though Dallas and Ft. Worth share same point, Collect doesn't
            # consolidate -- that's why 4 points in MultiPoint.
            self.assertEqual(4, len(coll))
            self.assertEqual(ref_geom, coll)
def infer_practice_boundaries():
    practices = Practice.objects.filter(location__isnull=False,
                                        setting=4).exclude(status_code__in=(
                                            Practice.STATUS_RETIRED,
                                            Practice.STATUS_DORMANT,
                                            Practice.STATUS_CLOSED,
                                        ))
    partition = practices.aggregate(voronoi=Func(
        Collect("location"), function="ST_VoronoiPolygons"))["voronoi"]
    national_boundary = get_national_boundary()
    practice_regions = get_practice_code_to_region_map(partition,
                                                       national_boundary)
    with transaction.atomic():
        for practice in practices:
            practice.boundary = practice_regions[practice.code]
            practice.save(update_fields=["boundary"])
示例#8
0
文件: viewsets.py 项目: flackdl/cwwed
 def get_queryset(self):
     """
     - group all geometries together (st_collect) by same variable & value
     """
     qs = NsemPsaContour.objects.filter(nsem_psa_variable__nsem=self.nsem)
     qs = qs.values(*[
         'value',
         'color',
         'date',
         'nsem_psa_variable__name',
         'nsem_psa_variable__data_type',
         'nsem_psa_variable__display_name',
         'nsem_psa_variable__units',
     ])
     qs = qs.annotate(geom=Collect(Cast('geo', GeometryField())))
     qs = qs.order_by('nsem_psa_variable__name')
     return qs
示例#9
0
    def get_context_data(self, **kwargs):
        context = super(SAPlaceDetailSub, self).get_context_data(**kwargs)

        context['child_place_template'] = self.child_place_template
        context['child_place_list_template'] = self.child_place_list_template
        context['subcontent_title'] = 'Constituency Offices'

        if self.object.kind.slug == 'province':
            context['child_places'] = (
                ZAPlace.objects.filter(
                    kind__slug__in=CONSTITUENCY_OFFICE_PLACE_KIND_SLUGS).
                filter(
                    # From https://github.com/mysociety/mapit/blob/e79689499cade74bed2d016cb1291c6849c0e8b7/mapit/geometryserialiser.py#L58
                    location__coveredby=self.object.mapit_area.polygons.
                    aggregate(Collect('polygon'))['polygon__collect']))

        return context
示例#10
0
    def test_collect(self):
        """
        Testing the `Collect` aggregate.
        """
        # Reference query:
        # SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
        #    "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
        #    WHERE "relatedapp_city"."state" = 'TX';
        ref_geom = GEOSGeometry(
            'MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,'
            '-95.363151 29.763374,-96.801611 32.782057)'
        )

        coll = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
        # Even though Dallas and Ft. Worth share same point, Collect doesn't
        # consolidate -- that's why 4 points in MultiPoint.
        self.assertEqual(4, len(coll))
        self.assertTrue(ref_geom.equals(coll))
示例#11
0
    def occurring_in_area(self, queryset, name, value):
        """Return Taxa occurring in the given Area.

        * The filter returns a list of Area objects as ``value``
        * We need to extract their PKs to create a queryset equivalent to
          the list of objects ``value``. Only querysets allow agggregation, not lists.
        * A search_area Multipolygon is collected from the geoms of Areas in ``value``
        * The queryset is filtered by intersection of its point or geom with the search area
        """
        if value:
            search_area = Area.objects.filter(
                pk__in=[area.pk for area in value]).aggregate(
                    Collect('geom'))["geom__collect"]
            return queryset.filter(
                Q(point__intersects=search_area)
                | Q(geom__intersects=search_area))
        else:
            return queryset
示例#12
0
 def _get_extent_summary(self, object_list):
     ids = [o.spatial_id for o in object_list]
     queryset = self.model.objects.filter(spatial_id__in=ids)
     summary = queryset.aggregate(
         Collect('outline'),
         Extent('outline'),
     )
     extents = {
         'count': queryset.count(),
     }
     if queryset.count():
         extents.update({
             'collect':
             json.loads(summary['outline__collect'].geojson),
             'convex_hull':
             json.loads(summary['outline__collect'].convex_hull.geojson),
             'extent': {
                 'xmin': summary['outline__extent'][0],
                 'ymin': summary['outline__extent'][1],
                 'xmax': summary['outline__extent'][2],
                 'ymax': summary['outline__extent'][3],
             },
         })
     return extents
    def handle_label(self, directory_name, **options):
        current_generation = Generation.objects.current()
        new_generation = Generation.objects.new()
        if not new_generation:
            raise Exception("No new generation to be used for import!")

        if not os.path.isdir(directory_name):
            raise Exception("'%s' is not a directory" % (directory_name,))

        os.chdir(directory_name)

        mapit_type_glob = smart_text("[A-Z0-9][A-Z0-9][A-Z0-9]")

        if not glob(mapit_type_glob):
            raise Exception(
                "'%s' did not contain any directories that look like MapIt types (e.g. O11, OWA, etc.)" % (
                    directory_name,))

        def verbose(s):
            if int(options['verbosity']) > 1:
                print(smart_str(s))

        verbose("Loading any admin boundaries from " + directory_name)

        verbose("Finding language codes...")

        language_code_to_name = {}
        code_keys = ('two_letter', 'three_letter')
        for row in get_iso639_2_table():
            english_name = getattr(row, 'english_name')
            for k in code_keys:
                code = getattr(row, k)
                if not code:
                    continue
                language_code_to_name[code] = english_name

        global_country = Country.objects.get(code='G')

        # print json.dumps(language_code_to_name, sort_keys=True, indent=4)

        skip_up_to = None
        # skip_up_to = 'relation-80370'

        skipping = bool(skip_up_to)

        for type_directory in sorted(glob(mapit_type_glob)):

            verbose("Loading type " + type_directory)

            if not os.path.exists(type_directory):
                verbose("Skipping the non-existent " + type_directory)
                continue

            verbose("Loading all KML in " + type_directory)

            files = sorted(os.listdir(type_directory))
            total_files = len(files)

            for i, e in enumerate(files):

                progress = "[%d%% complete] " % ((i * 100) / total_files,)

                if skipping:
                    if skip_up_to in e:
                        skipping = False
                    else:
                        continue

                if not e.endswith('.kml'):
                    verbose("Ignoring non-KML file: " + e)
                    continue

                m = re.search(r'^(way|relation)-(\d+)-', e)
                if not m:
                    raise Exception("Couldn't extract OSM element type and ID from: " + e)

                osm_type, osm_id = m.groups()

                kml_filename = os.path.join(type_directory, e)

                verbose(progress + "Loading " + os.path.realpath(kml_filename))

                # Need to parse the KML manually to get the ExtendedData
                kml_data = KML()
                xml.sax.parse(smart_str(kml_filename), kml_data)

                useful_names = [n for n in kml_data.data.keys() if not n.startswith('Boundaries for')]
                if len(useful_names) == 0:
                    raise Exception("No useful names found in KML data")
                elif len(useful_names) > 1:
                    raise Exception("Multiple useful names found in KML data")
                name = useful_names[0]
                print(smart_str("  %s" % name))

                if osm_type == 'relation':
                    code_type_osm = CodeType.objects.get(code='osm_rel')
                elif osm_type == 'way':
                    code_type_osm = CodeType.objects.get(code='osm_way')
                else:
                    raise Exception("Unknown OSM element type: " + osm_type)

                ds = DataSource(kml_filename)
                layer = ds[0]
                if len(layer) != 1:
                    raise Exception("We only expect one feature in each layer")

                feat = layer[1]

                g = feat.geom.transform(4326, clone=True)

                if g.geom_count == 0:
                    # Just ignore any KML files that have no polygons in them:
                    verbose('    Ignoring that file - it contained no polygons')
                    continue

                # Nowadays, in generating the data we should have
                # excluded any "polygons" with less than four points
                # (the final one being the same as the first), but
                # just in case:
                polygons_too_small = 0
                for polygon in g:
                    if polygon.num_points < 4:
                        polygons_too_small += 1
                if polygons_too_small:
                    message = "%d out of %d polygon(s) were too small" % (polygons_too_small, g.geom_count)
                    verbose('    Skipping, since ' + message)
                    continue

                g_geos = g.geos

                if not g_geos.valid:
                    verbose("    Invalid KML:" + kml_filename)
                    fixed_multipolygon = fix_invalid_geos_multipolygon(g_geos)
                    if len(fixed_multipolygon) == 0:
                        verbose("    Invalid polygons couldn't be fixed")
                        continue
                    g = fixed_multipolygon.ogr

                area_type = Type.objects.get(code=type_directory)

                try:
                    osm_code = Code.objects.get(type=code_type_osm,
                                                code=osm_id,
                                                area__generation_high__lte=current_generation,
                                                area__generation_high__gte=current_generation)
                except Code.DoesNotExist:
                    verbose('    No area existed in the current generation with that OSM element type and ID')
                    osm_code = None

                was_the_same_in_current = False

                if osm_code:
                    m = osm_code.area

                    # First, we need to check if the polygons are
                    # still the same as in the previous generation:
                    previous_geos_geometry = m.polygons.aggregate(Collect('polygon'))['polygon__collect']
                    if previous_geos_geometry is None:
                        verbose('    In the current generation, that area was empty - skipping')
                    else:
                        # Simplify it to make sure the polygons are valid:
                        previous_geos_geometry = shapely.wkb.loads(
                            str(previous_geos_geometry.simplify(tolerance=0).ewkb))
                        new_geos_geometry = shapely.wkb.loads(str(g.geos.simplify(tolerance=0).ewkb))
                        if previous_geos_geometry.almost_equals(new_geos_geometry, decimal=7):
                            was_the_same_in_current = True
                        else:
                            verbose('    In the current generation, the boundary was different')

                if was_the_same_in_current:
                    # Extend the high generation to the new one:
                    verbose('    The boundary was identical in the previous generation; raising generation_high')
                    m.generation_high = new_generation

                else:
                    # Otherwise, create a completely new area:
                    m = Area(
                        name=name,
                        type=area_type,
                        country=global_country,
                        parent_area=None,
                        generation_low=new_generation,
                        generation_high=new_generation,
                    )

                poly = [g]

                if options['commit']:
                    m.save()
                    verbose('    Area ID: ' + str(m.id))

                    if name not in kml_data.data:
                        print(json.dumps(kml_data.data, sort_keys=True, indent=4))
                        raise Exception("Will fail to find '%s' in the dictionary" % (name,))

                    old_lang_codes = set(n.type.code for n in m.names.all())

                    for k, translated_name in kml_data.data[name].items():
                        language_name = None
                        if k == 'name':
                            lang = 'default'
                            language_name = "OSM Default"
                        else:
                            name_match = re.search(r'^name:(.+)$', k)
                            if name_match:
                                lang = name_match.group(1)
                                if lang in language_code_to_name:
                                    language_name = language_code_to_name[lang]
                        if not language_name:
                            continue
                        old_lang_codes.discard(lang)

                        # Otherwise, make sure that a NameType for this language exists:
                        NameType.objects.update_or_create(code=lang, defaults={'description': language_name})
                        name_type = NameType.objects.get(code=lang)

                        m.names.update_or_create(type=name_type, defaults={'name': translated_name})

                    if old_lang_codes:
                        verbose('Removing deleted languages codes: ' + ' '.join(old_lang_codes))
                    m.names.filter(type__code__in=old_lang_codes).delete()
                    # If the boundary was the same, the old Code
                    # object will still be pointing to the same Area,
                    # which just had its generation_high incremented.
                    # In every other case, there's a new area object,
                    # so create a new Code and save it:
                    if not was_the_same_in_current:
                        new_code = Code(area=m, type=code_type_osm, code=osm_id)
                        new_code.save()
                    save_polygons({'dummy': (m, poly)})
示例#14
0
    def handle_label(self, filename, **options):

        missing_options = []
        for k in [
                'generation_id', 'area_type_code', 'name_type_code',
                'country_code'
        ]:
            if options[k]:
                continue
            else:
                missing_options.append(k)
        if missing_options:
            message_start = "Missing arguments " if len(
                missing_options) > 1 else "Missing argument "
            message = message_start + " ".join('--{0}'.format(k)
                                               for k in missing_options)
            raise CommandError(message)

        generation_id = options['generation_id']
        area_type_code = options['area_type_code']
        name_type_code = options['name_type_code']
        country_code = options['country_code']
        override_name = options['override_name']
        name_field = options['name_field']
        if not (override_name or name_field):
            name_field = 'Name'
        override_code = options['override_code']
        code_field = options['code_field']
        code_type_code = options['code_type']
        encoding = options['encoding'] or 'utf-8'

        if name_field and override_name:
            raise CommandError(
                "You must not specify both --name_field and --override_name")
        if code_field and override_code:
            raise CommandError(
                "You must not specify both --code_field and --override_code")

        using_code = (code_field or override_code)
        if (using_code and not code_type_code) or (not using_code
                                                   and code_type_code):
            raise CommandError(
                "If you want to save a code, specify --code_type and either --code_field or --override_code"
            )
        try:
            area_type = Type.objects.get(code=area_type_code)
        except:
            type_desc = input(
                'Please give a description for area type code %s: ' %
                area_type_code)
            area_type = Type(code=area_type_code, description=type_desc)
            if options['commit']:
                area_type.save()

        try:
            name_type = NameType.objects.get(code=name_type_code)
        except:
            name_desc = input(
                'Please give a description for name type code %s: ' %
                name_type_code)
            name_type = NameType(code=name_type_code, description=name_desc)
            if options['commit']:
                name_type.save()

        if country_code != 'first-letter':
            try:
                country = Country.objects.get(code=country_code)
            except:
                country_name = input(
                    'Please give the name for country code %s: ' %
                    country_code)
                country = Country(code=country_code, name=country_name)
                if options['commit']:
                    country.save()

        if code_type_code:
            try:
                code_type = CodeType.objects.get(code=code_type_code)
            except:
                code_desc = input(
                    'Please give a description for code type %s: ' %
                    code_type_code)
                code_type = CodeType(code=code_type_code,
                                     description=code_desc)
                if options['commit']:
                    code_type.save()

        self.stdout.write("Importing from %s" % filename)

        if not options['commit']:
            self.stdout.write(
                '(will not save to db as --commit not specified)')

        current_generation = Generation.objects.current()
        new_generation = Generation.objects.get(id=generation_id)

        def verbose(*args):
            if int(options['verbosity']) > 1:
                self.stdout.write(" ".join(str(a) for a in args))

        ds = DataSource(filename)
        layer = ds[0]
        if (override_name or override_code) and len(layer) > 1:
            message = (
                "Warning: you have specified an override %s and this file contains more than one feature; "
                "multiple areas with the same %s will be created")
            if override_name:
                self.stdout.write(message % ('name', 'name'))
            if override_code:
                self.stdout.write(message % ('code', 'code'))

        for feat in layer:

            if override_name:
                name = override_name
            else:
                name = None
                for nf in name_field.split(','):
                    try:
                        name = feat[nf].value
                        break
                    except:
                        pass
                if name is None:
                    choices = ', '.join(layer.fields)
                    raise CommandError(
                        "Could not find name using name field '%s' - should it be something else? "
                        "It will be one of these: %s. Specify which with --name_field"
                        % (name_field, choices))
                try:
                    if not isinstance(name, six.text_type):
                        name = name.decode(encoding)
                except:
                    raise CommandError(
                        "Could not decode name using encoding '%s' - is it in another encoding? "
                        "Specify one with --encoding" % encoding)

            name = re.sub(r'\s+', ' ', name)
            if not name:
                if options['ignore_blank']:
                    continue
                raise Exception("Could not find a name to use for area")

            code = None
            if override_code:
                code = override_code
            elif code_field:
                try:
                    code = feat[code_field].value
                except:
                    choices = ', '.join(layer.fields)
                    raise CommandError(
                        "Could not find code using code field '%s' - should it be something else? "
                        "It will be one of these: %s. Specify which with --code_field"
                        % (code_field, choices))

            self.stdout.write("  looking at '%s'%s" %
                              (name, (' (%s)' % code) if code else ''))

            if country_code == 'first-letter' and code:
                try:
                    country = Country.objects.get(code=code[0])
                except Country.DoesNotExist:
                    self.stdout.write("    No country found from first-letter")
                    country = None

            g = None
            if hasattr(feat, 'geom'):
                g = feat.geom.transform(settings.MAPIT_AREA_SRID, clone=True)

            try:
                if options['new']:  # Always want a new area
                    raise Area.DoesNotExist
                if code:
                    matching_message = "code %s of code type %s" % (code,
                                                                    code_type)
                    areas = Area.objects.filter(
                        codes__code=code,
                        codes__type=code_type).order_by('-generation_high')
                else:
                    matching_message = "name %s of area type %s" % (name,
                                                                    area_type)
                    areas = Area.objects.filter(
                        name=name, type=area_type).order_by('-generation_high')
                if len(areas) == 0:
                    verbose("    the area was not found - creating a new one")
                    raise Area.DoesNotExist
                m = areas[0]
                verbose("    found the area")
                if options['preserve']:
                    # Find whether we need to create a new Area:
                    previous_geos_geometry = m.polygons.aggregate(
                        Collect('polygon'))['polygon__collect']
                    if m.generation_high < current_generation.id:
                        # Then it was missing in current_generation:
                        verbose(
                            "    area existed previously, but was missing from",
                            current_generation)
                        raise Area.DoesNotExist
                    elif g is None:
                        if previous_geos_geometry is not None:
                            verbose("    area is now empty")
                            raise Area.DoesNotExist
                        else:
                            verbose("    the area has remained empty")
                    elif previous_geos_geometry is None:
                        # It was empty in the previous generation:
                        verbose("    area was empty in", current_generation)
                        raise Area.DoesNotExist
                    else:
                        # Otherwise, create a new Area unless the
                        # polygons were the same in current_generation:
                        previous_geos_geometry = previous_geos_geometry.simplify(
                            tolerance=0)
                        new_geos_geometry = g.geos.simplify(tolerance=0)
                        create_new_area = not previous_geos_geometry.equals(
                            new_geos_geometry)
                        p = previous_geos_geometry.sym_difference(
                            new_geos_geometry
                        ).area / previous_geos_geometry.area
                        verbose("    change in area is:",
                                "%.03f%%" % (100 * p, ))
                        if create_new_area:
                            verbose(
                                "    the area", m,
                                "has changed, creating a new area due to --preserve"
                            )
                            raise Area.DoesNotExist
                        else:
                            verbose("    the area remained the same")
                else:
                    # If --preserve is not specified, the code or the name must be unique:
                    if len(areas) > 1:
                        raise Area.MultipleObjectsReturned(
                            "There was more than one area with %s, and --preserve was not specified"
                            % (matching_message, ))

            except Area.DoesNotExist:
                m = Area(
                    name=name,
                    type=area_type,
                    country=country,
                    # parent_area=parent_area,
                    generation_low=new_generation,
                    generation_high=new_generation,
                )
                if options['use_code_as_id'] and code:
                    m.id = int(code)

            # check that we are not about to skip a generation
            if m.generation_high and current_generation and m.generation_high.id < current_generation.id:
                raise Exception(
                    "Area %s found, but not in current generation %s" %
                    (m, current_generation))
            m.generation_high = new_generation

            if options['fix_invalid_polygons'] and g is not None:
                # Make a GEOS geometry only to check for validity:
                geos_g = g.geos
                if not geos_g.valid:
                    geos_g = fix_invalid_geos_geometry(geos_g)
                    if geos_g is None:
                        self.stdout.write(
                            "The geometry for area %s was invalid and couldn't be fixed"
                            % name)
                        g = None
                    else:
                        g = geos_g.ogr

            poly = [g] if g is not None else []

            if options['commit']:
                m.save()
                m.names.update_or_create(type=name_type,
                                         defaults={'name': name})
                if code:
                    m.codes.update_or_create(type=code_type,
                                             defaults={'code': code})
                save_polygons({m.id: (m, poly)})
示例#15
0
 def getSpeciesMetadata(self):
     """
     Returns metadata for all species at a specific genus
     """
     species = self.QuerySet.filter(genus_id=self.id).values('species_id').annotate(points=Collect('geom'),ab=Count('species_id'),name=Min('scientific_name'))
     return species
示例#16
0
    def generate_clusters(cls, precision, operator=None):
        if not cls.objects.filter(precision=precision,
                                  operator=operator).exists():
            # Generate clusters from smaller clusters
            if 1 <= precision < MAX_CLUSTER_PRECISION_SIZE:
                print("Generating clusters...")
                # Get smaller clusters and annotate the new geohash for the bigger clusters
                smaller_precision = precision + 1
                smaller_clusters = cls.objects.filter(precision=smaller_precision, operator=operator)\
                    .annotate(bigger_geohash=GeoHash('point', precision=precision))
                # Group by bigger geohash
                clusters_hashes = smaller_clusters.values(
                    'bigger_geohash').distinct()
                total = clusters_hashes.count()
                if not total:
                    raise ValueError(
                        "No clusters found for precision {}".format(precision +
                                                                    1))
                print("Saving data for {} clusters...".format(total))
                loop_counter = 0
                percentage = 0
                cluster_array = []
                for cluster_dict in clusters_hashes:
                    geohash = cluster_dict['bigger_geohash']
                    # Get data from smaller clusters
                    sub_clusters = smaller_clusters.filter(
                        bigger_geohash=geohash).values('point', 'count',
                                                       'data')
                    count = reduce((lambda acc, cl: acc + cl['count']),
                                   sub_clusters, 0)
                    point = Point(
                        reduce((lambda acc, cl: acc +
                                (cl['point'].x * float(cl['count']))),
                               sub_clusters, 0.0) / float(count),
                        reduce((lambda acc, cl: acc +
                                (cl['point'].y * float(cl['count']))),
                               sub_clusters, 0.0) / float(count))
                    data = '' if count != 1 else sub_clusters[0]['data']
                    cluster = cls(point=point,
                                  precision=precision,
                                  count=count,
                                  data=data,
                                  operator=operator)
                    cluster_array.append(cluster)
                    if len(cluster_array) >= DATABASE_COMMIT_SIZE:
                        cls.objects.bulk_create(cluster_array)
                        cluster_array = []
                    loop_counter += 1
                    prev_percentage = percentage
                    percentage = 100 * loop_counter // total
                    if percentage > prev_percentage:
                        print(" {}% done ({} clusters)".format(
                            percentage, loop_counter))
                if len(cluster_array) > 0:
                    cls.objects.bulk_create(cluster_array)
                return

            # Generate clusters from base stations
            elif precision == MAX_CLUSTER_PRECISION_SIZE:
                print("Generating clusters...")
                # Add geohash to all base stations
                base_stations = BS_MODEL.objects.annotate(
                    geohash=GeoHash('point', precision=precision))
                # Filter by operator
                if operator:
                    mnc_list = [m.value for m in operator.mnc_set.all()]
                    base_stations = base_stations.filter(mnc__in=mnc_list)
                # Group by geohash and get cluster MultiPoint and count
                clusters_values = base_stations.values('geohash').annotate(
                    count=Count('point'), geom=Collect('point'))
                total = clusters_values.count()
                if not total:
                    raise ValueError(
                        "No base stations found for precision {}".format(
                            precision))
                print("Saving data for {} clusters...".format(total))
                loop_counter = 0
                percentage = 0
                cluster_array = []
                for cluster_dict in clusters_values:
                    count = cluster_dict['count']
                    point = cluster_dict['geom'].centroid
                    data = '' if count != 1 else base_stations.get(
                        geohash=cluster_dict['geohash']).data
                    cluster = cls(point=point,
                                  precision=precision,
                                  count=count,
                                  data=data,
                                  operator=operator)
                    cluster_array.append(cluster)
                    if len(cluster_array) >= DATABASE_COMMIT_SIZE:
                        cls.objects.bulk_create(cluster_array)
                        cluster_array = []
                    loop_counter += 1
                    prev_percentage = percentage
                    percentage = 100 * loop_counter // total
                    if percentage > prev_percentage:
                        print(" {}% done ({} clusters)".format(
                            percentage, loop_counter))
                if len(cluster_array) > 0:
                    cls.objects.bulk_create(cluster_array)
                return

            else:
                raise ValueError(
                    "precision must be in the [1, {}] interval".format(
                        MAX_CLUSTER_PRECISION_SIZE))

        else:
            operator_string = ' and operator {}'.format(
                operator) if operator else ''
            raise ValueError(
                "There are already clusters for precision {}{}".format(
                    precision, operator_string))
示例#17
0
 def getGenusMetadata(self):
     """
     Returns metadata for all genus of a specific family
     """
     genera = self.QuerySet.filter(family_id=self.id).values('genus_id').annotate(points=Collect('geom'),ab=Count('genus_id'),name=Min('genus'))
     return genera
示例#18
0
 def getFamiliesMetadata(self):
     """
     Returns metadata for all families of a specific class
     """
     families = self.QuerySet.filter(order_id=self.id).values('family_id').annotate(points=Collect('geom'),ab=Count('family_id'),name=Min('family'))
     return families
示例#19
0
 def getOrdersMetadata(self):
     """
     Returns metadata for all classes of a specific order
     """
     orders = self.QuerySet.filter(class_id=self.id).values('order_id').annotate(points=Collect('geom'),ab=Count('order_id'),name=Min('_order'))
     return orders    
示例#20
0
 def getClassesMetadata(self):
     """
     Returns metadata for all classes of a specific order
     """
     classes = self.QuerySet.filter(phylum_id=self.id).values('class_id').annotate(points=Collect('geom'),ab=Count('class_id'),name=Min('_class'))
     return classes    
示例#21
0
    def handle(self, **options):

        # from http://colorbrewer2.org/index.php?type=diverging&scheme=Spectral&n=11
        party_to_colour = {
            'Party of National Unity': '#9e0142',
            'Kenya African National Union': '#d53e4f',
            'Orange Democratic Movement': '#f46d43',
            'Orange Democratic Movement Party Of Kenya': '#fdae61',
            'NARC - Kenya': '#fee08b',
            'Safina Party Of Kenya': '#ffffbf',
            'National Rainbow Coalition': '#e6f598',
            'Ford People': '#abdda4',
            'Democratic Party': '#66c2a5',
            'spare': '#3288bd', # not used
            'other': '#5e4fa2', 
        }

        rows       = []
        fieldnames = ['name', 'person', 'party', 'color', 'location']
        

        # get all the constituencies
        constituencies = models.Place.objects.all().filter(kind__slug='constituency');

        for con in constituencies:
            row = {
                'name':     con.name,
                'person':   '',   
                'party':    '',
                'location': '',
                'color':    '',
            }

            # get the person and party data
            pos = con.current_politician_position()
            if pos:
                if pos.person:
                    person = pos.person
                    row['person'] = person.name;
                    
                    parties = []
                    for party in person.parties():
                        parties.append( party.name)
                    row['party'] = ', '.join(parties)
            
                    row['color'] = party_to_colour.get( row['party'] ) or party_to_colour['other'];
            
            # get the kml positions
            area = con.mapit_area
            if area:
                all_areas = area.polygons.all()

                if len(all_areas) > 1:
                    all_areas = all_areas.aggregate(Collect('polygon'))['polygon__collect']
                elif len(all_areas) == 1:
                    all_areas = all_areas[0].polygon
                # else:
                #     return output_json({ 'error': 'No polygons found' }, code=404)


                # Note - the following commented out as it causes issues for
                # some constiuncies - see
                #   https://github.com/mysociety/pombola/issues/443 for details.
                # Not using this only results in the CS produced bein much larger
                # (7MB as opposed to 1MB) but for us this is not really an issue.
                # apply a simplify_tolerance to make CSV smaller
                # all_areas = all_areas.simplify(0.001)                                        

                row['location'] = all_areas.kml
                        
            rows.append(row)


        csv_output = StringIO.StringIO()
        writer = csv.DictWriter( csv_output, fieldnames )

        fieldname_dict = {}
        for key in fieldnames:
            fieldname_dict[key] = key
        writer.writerow( fieldname_dict )
        
        for data in rows:
            writer.writerow( data )
        
        print csv_output.getvalue()
    def handle_label(self, directory_name, **options):
        if not os.path.isdir(directory_name):
            raise Exception("'%s' is not a directory" % (directory_name,))

        os.chdir(directory_name)
        skip_up_to = None
        # skip_up_to = 'relation-80370'

        skipping = bool(skip_up_to)

        osm_elements_seen_in_new_data = set([])

        with open("/home/mark/difference-results.csv", 'w') as fp:
            csv_writer = csv.writer(fp)
            csv_writer.writerow(["ElementType",
                                 "ElementID",
                                 "ExistedPreviously",
                                 "PreviousEmpty",
                                 "PreviousArea",
                                 "NewEmpty",
                                 "NewArea",
                                 "SymmetricDifferenceArea",
                                 "GEOSEquals",
                                 "GEOSEqualsExact"])

            for admin_directory in sorted(x for x in os.listdir('.') if os.path.isdir(x)):

                if not re.search('^[A-Z0-9]{3}$', admin_directory):
                    print("Skipping a directory that doesn't look like a MapIt type:", admin_directory)

                if not os.path.exists(admin_directory):
                    continue

                files = sorted(os.listdir(admin_directory))

                for i, e in enumerate(files):

                    if skipping:
                        if skip_up_to in e:
                            skipping = False
                        else:
                            continue

                    if not e.endswith('.kml'):
                        continue

                    m = re.search(r'^(way|relation)-(\d+)-', e)
                    if not m:
                        raise Exception("Couldn't extract OSM element type and ID from: " + e)

                    osm_type, osm_id = m.groups()

                    osm_elements_seen_in_new_data.add((osm_type, osm_id))

                    kml_filename = os.path.join(admin_directory, e)

                    # Need to parse the KML manually to get the ExtendedData
                    kml_data = KML()
                    print("parsing", kml_filename)
                    xml.sax.parse(kml_filename, kml_data)

                    useful_names = [n for n in kml_data.data.keys() if not n.startswith('Boundaries for')]
                    if len(useful_names) == 0:
                        raise Exception("No useful names found in KML data")
                    elif len(useful_names) > 1:
                        raise Exception("Multiple useful names found in KML data")
                    name = useful_names[0]
                    print(" ", smart_str(name))

                    if osm_type == 'relation':
                        code_type_osm = CodeType.objects.get(code='osm_rel')
                    elif osm_type == 'way':
                        code_type_osm = CodeType.objects.get(code='osm_way')
                    else:
                        raise Exception("Unknown OSM element type: " + osm_type)

                    ds = DataSource(kml_filename)
                    if len(ds) != 1:
                        raise Exception("We only expect one layer in a DataSource")

                    layer = ds[0]
                    if len(layer) != 1:
                        raise Exception("We only expect one feature in each layer")

                    feat = layer[0]

                    osm_codes = list(Code.objects.filter(type=code_type_osm, code=osm_id))
                    osm_codes.sort(key=lambda e: e.area.generation_high.created)

                    new_area = None
                    new_empty = None

                    previous_area = None
                    previous_empty = None

                    symmetric_difference_area = None

                    g = feat.geom.transform(4326, clone=True)

                    for polygon in g:
                        if polygon.point_count < 4:
                            new_empty = True
                    if not new_empty:
                        new_geos_geometry = g.geos.simplify(tolerance=0)
                        new_area = new_geos_geometry.area
                        new_empty = new_geos_geometry.empty

                    geos_equals = None
                    geos_equals_exact = None

                    most_recent_osm_code = None
                    if osm_codes:
                        most_recent_osm_code = osm_codes[-1]
                        previous_geos_geometry = most_recent_osm_code.area.polygons.aggregate(Collect('polygon'))
                        previous_geos_geometry = previous_geos_geometry['polygon__collect']
                        previous_empty = previous_geos_geometry is None

                        if not previous_empty:
                            previous_geos_geometry = previous_geos_geometry.simplify(tolerance=0)
                            previous_area = previous_geos_geometry.area

                            if not new_empty:
                                symmetric_difference_area = previous_geos_geometry.sym_difference(
                                    new_geos_geometry).area
                                geos_equals = previous_geos_geometry.equals(new_geos_geometry)
                                geos_equals_exact = previous_geos_geometry.equals_exact(new_geos_geometry)

                    csv_writer.writerow([osm_type,
                                         osm_id,
                                         bool(osm_codes),  # ExistedPreviously
                                         empty_if_none(previous_empty),
                                         empty_if_none(previous_area),
                                         empty_if_none(new_empty),
                                         empty_if_none(new_area),
                                         empty_if_none(symmetric_difference_area),
                                         empty_if_none(geos_equals),
                                         empty_if_none(geos_equals_exact)])
示例#23
0
 def get_context_data(self, **kwargs):
     context = super(UserPlaceListView, self).get_context_data(**kwargs)
     context['center'] = self.get_queryset().aggregate(
         position_center=Collect('position'))['position_center'].centroid
     return context
示例#24
0
 def getPhylaMetadata(self):
     """
     Returns metadata for all phyla of a specific kingdom
     """
     phyla = self.QuerySet.filter(kingdom_id=self.id).values('phylum_id').annotate(points=Collect('geom'),ab=Count('phylum_id'),name=Min('phylum'))
     return phyla    
示例#25
0
    def handle_label(self, directory_name, **options):
        current_generation = Generation.objects.current()

        if not os.path.isdir(directory_name):
            raise Exception("'%s' is not a directory" % (directory_name,))

        os.chdir(directory_name)

        mapit_type_glob = smart_text("[A-Z0-9][A-Z0-9][A-Z0-9]")

        if not glob(mapit_type_glob):
            raise Exception(
                "'%s' did not contain any directories that look like MapIt types (e.g. O11, OWA, etc.)" % (
                    directory_name,))

        def verbose(s):
            if int(options['verbosity']) > 1:
                print(smart_str(s))

        verbose("Loading any admin boundaries from " + directory_name)
        for type_directory in sorted(glob(mapit_type_glob)):

            verbose("Loading type " + type_directory)

            if not os.path.exists(type_directory):
                verbose("Skipping the non-existent " + type_directory)
                continue

            verbose("Loading all KML in " + type_directory)

            files = sorted(os.listdir(type_directory))
            total_files = len(files)

            for i, e in enumerate(files):
                progress = "[%d%% complete] " % ((i * 100) / total_files,)

                if not e.endswith('.kml'):
                    verbose("Ignoring non-KML file: " + e)
                    continue

                m = re.search(r'^(way|relation)-(\d+)-', e)
                if not m:
                    raise Exception("Couldn't extract OSM element type and ID from: " + e)

                osm_type, osm_id = m.groups()
                kml_filename = os.path.join(type_directory, e)
                verbose(progress + "Loading " + os.path.realpath(kml_filename))

                if osm_type == 'relation':
                    code_type_osm = CodeType.objects.get(code='osm_rel')
                elif osm_type == 'way':
                    code_type_osm = CodeType.objects.get(code='osm_way')
                else:
                    raise Exception("Unknown OSM element type: " + osm_type)

                ds = DataSource(kml_filename)
                layer = ds[0]
                if len(layer) != 1:
                    raise Exception("We only expect one feature in each layer")

                feat = layer[1]

                g = feat.geom.transform(4326, clone=True)

                if g.geom_count == 0:
                    verbose('    Ignoring that file - it contained no polygons')
                    continue

                polygons_too_small = 0
                for polygon in g:
                    if polygon.num_points < 4:
                        polygons_too_small += 1
                if polygons_too_small:
                    message = "%d out of %d polygon(s) were too small" % (polygons_too_small, g.geom_count)
                    verbose('    Skipping, since ' + message)
                    continue

                g_geos = g.geos
                if not g_geos.valid:
                    verbose("    Invalid KML:" + kml_filename)
                    fixed_multipolygon = fix_invalid_geos_multipolygon(g_geos)
                    if len(fixed_multipolygon) == 0:
                        verbose("    Invalid polygons couldn't be fixed")
                        continue
                    g = fixed_multipolygon.ogr

                osm_code = Code.objects.get(
                    type=code_type_osm,
                    code=osm_id,
                    area__generation_high__lte=current_generation,
                    area__generation_high__gte=current_generation)

                m = osm_code.area

                previous_geos_geometry = m.polygons.aggregate(Collect('polygon'))['polygon__collect']
                previous_geos_geometry = shapely.wkb.loads(str(previous_geos_geometry.simplify(tolerance=0).ewkb))
                new_geos_geometry = shapely.wkb.loads(str(g.geos.simplify(tolerance=0).ewkb))
                if previous_geos_geometry.almost_equals(new_geos_geometry, decimal=7):
                    verbose('    Boundary unchanged')
                else:
                    verbose('    In the current generation, the boundary was different')
                    poly = [g]
                    if options['commit']:
                        save_polygons({'dummy': (m, poly)})