def construct_sample_primary_component_percents(self, sample_placetype_components, client): all_primary_components = self.construct_primary_components() primary_component_dict = map_to_dict(lambda building: [building['building_attribute_set']['name'], building], all_primary_components) sample_placetype_component_dict = map_to_dict( lambda placetype_component: [placetype_component['building_attribute_set']['name'], placetype_component], sample_placetype_components ) sample_primary_component_percents = [] for import_primary_component in self.load_buildings_csv(client): component = import_primary_component.placetype_component if component not in sample_placetype_component_dict: print "BuildingType " + import_primary_component.placetype + " is not used in this set :: skipping" continue placetype_component = sample_placetype_component_dict[component] building_percent = dict( primary_component_name=import_primary_component.name, primary_component=primary_component_dict[import_primary_component.name], placetype_component_name=import_primary_component.building_type, placetype_component=placetype_component, percent=import_primary_component.percent_of_building_type ) sample_primary_component_percents.append(building_percent) return sample_primary_component_percents
def create_result_map(self, values_query_set): related_models = self.resolve_join_models() logger.debug("Creating result map for related models %s feature class %s" % (', '.join(map(lambda r: str(r), related_models)), self.feature_class)) feature_class_creator = FeatureClassCreator.from_dynamic_model_class(self.feature_class) geography_scopes = feature_class_creator.geography_scopes() # Get the related model paths final segment. We want to map these to the db_entity_key names related_model_path_to_name = map_to_dict( lambda related_model: [resolve_related_model_path_via_geographies( self.feature_class.objects, related_model).split('__')[1], related_model.db_entity_key], related_models ) return values_query_set.create_result_map( related_models=related_models, # map_path_segments maps related object paths to their model name, # and removes the geographies segment of the path map_path_segments=merge( # Map each geography scope to its corresponding field on the feature class map_to_dict( lambda geography_scope: [ feature_class_creator.geographies_field(geography_scope).name, None ], geography_scopes), related_model_path_to_name) )
def construct_sample_placetype_components(self, sample_placetypes): sample_placetype_components = [] all_placetype_components = self.construct_placetype_components( client='default') placetype_component_dict = map_to_dict( lambda placetype_component: [ placetype_component['building_attribute_set']['name'], placetype_component ], all_placetype_components) sample_buildingtype_percents = self.construct_placetype_component_percents( map_to_dict( lambda placetype: [placetype['building_attribute_set']['name'], placetype], sample_placetypes), placetype_component_dict) placetype_components = [] for placetype, components in sample_buildingtype_percents.items(): for placetype_components, attributes in components[ 'placetype_components'].items(): placetype_components.append(placetype_components) for placetype_components in set(placetype_components): sample_placetype_components.append( {'building_attribute_set': { 'name': placetype_components }}) return sample_placetype_components
def create_result_map(self, values_query_set): related_models = self.resolve_join_models() logger.debug( "Creating result map for related models %s feature class %s" % (', '.join(map(lambda r: str(r), related_models)), self.feature_class)) feature_class_creator = FeatureClassCreator.from_dynamic_model_class( self.feature_class) geography_scopes = feature_class_creator.geography_scopes() # Get the related model paths final segment. We want to map these to the db_entity_key names related_model_path_to_name = map_to_dict( lambda related_model: [ resolve_related_model_path_via_geographies( self.feature_class.objects, related_model).split('__')[1], related_model.db_entity_key ], related_models) return values_query_set.create_result_map( related_models=related_models, # map_path_segments maps related object paths to their model name, # and removes the geographies segment of the path map_path_segments=merge( # Map each geography scope to its corresponding field on the feature class map_to_dict( lambda geography_scope: [ feature_class_creator.geographies_field(geography_scope ).name, None ], geography_scopes), related_model_path_to_name))
def key_to_dynamic_model_class_lookup(self, configurations_or_containers=None): """ Returns all configuration keys mapped to a dynamic subclass or None if no class is configured yet for the configuration :param config_entity: Used to scope the Feature classes. If no then abstract classes are returned :param configurations_or_containers: Optional specific set of configurations. These are the same type passed to the second argument of the constructor will be used. You must specify this before the db_entities have been created on the config_entity, for the case where the config_entity is null. :return: A dict keyed by db_entity key and valued by a dynamic model subclass or None """ configurations = configurations_or_containers or self.dynamic_model_configurations() if not self.config_entity: return self.__class__.key_to_abstract_model_class_lookup(configurations) # Get the config_entity from the first self.db_entity_configuration with a feature_class_configuration # Get the corresponding db_entities from the config_entity existing_configurations = map( lambda configuration: self.dynamic_model_configuration(configuration.key), map(lambda configuration: self.__class__.resolve_configuration(configuration), configurations)) return map_to_dict(lambda existing_configuration: [existing_configuration.key, self.__class__(self.config_entity, existing_configuration, self.no_ensure).dynamic_model_class()], existing_configurations)
def sql_map_lambda(self, query_set): field_to_table_lookup = map_to_dict(lambda tup: [tup[1], tup[0]], query_set.query.select) def sql_map(path): """ Like field_map, but instead produces a sql version of the mapping. Since our export functionality sends sql to ogr, we need to handle all the formatting in the sql. We can do this by overriding the normal select value with a function (e.g. select updated_date becomes select format(update_data). In order to do this we create an extra clause for the Django QuerySet since it sadly doesn't have a built-in ability to decorate selections with functions :param path: :param field_class_path: :return: An array with two values, the path and the mapping. If no mapping is needed, an array with [path, path] is returned. This way the path is used as an extra value and the column order is preserved. Django nonsensically puts the extra fields before the normal fields, even if their names match. Apparently they didn't considered the situation of replacing a normal column select with a formatted column select, or they don't expect raw sql to be used. """ full_column_name = '%s.%s' % (field_to_table_lookup.get(path), path.split('__')[-1]) if field_to_table_lookup.get(path) else path field_class_path = self.result_map.field_lookup.get(path) if not field_class_path: return None resolved_field_class = resolve_module_attr(field_class_path) if resolved_field_class and issubclass(resolved_field_class, DateField): # Format the date to match our prefered style (gotta love SQL :< ) return [path, "to_char({0}, 'YYYY-MM-DD') || 'T' || to_char({0}, 'HH:MI:SS') || to_char(extract(TIMEZONE_HOUR FROM {0}), 'fm00') || ':' || to_char(extract(TIMEZONE_MINUTE FROM {0}), 'fm00')".format(full_column_name)] return None return sql_map
def __new__(meta, name, bases, attrs): # Register any additional model fields specified in fields def add_field(sender, **kwargs): if sender.__name__ == computed_name: for field_name, field in fields.items(): field.contribute_to_class(sender, field_name) class_prepared.connect(add_field) def create_class_property(class_attr): related_attr = class_attr.split('__')[0] related_class_name = related_class_lookup.get(related_attr, None) if not related_class_name: raise Exception("Expected related_class_lookup to contain %s, since class_attrs contain %s" % (related_attr, class_attr) ) related_class = resolve_module_attr(related_class_name) # Create the getter property that uses the class manager to lookup up the related model by id def getter(cls): return related_class.objects.get(id=getattr(cls, class_attr)) return ClassProperty(classmethod(getter)) # Create class-level getter properties to resolve things like the config_entity since we only store the id class_properties = map_to_dict( lambda class_attr: [class_attr.split('__')[0], create_class_property(class_attr)], filter(lambda class_attr: class_attr.endswith('__id'), class_attrs)) return models.base.ModelBase.__new__( meta, computed_name, (base_class,), # Merge the manager objects (if the abstract_class has one) with attrs and class_attrs merge(dict(objects=base_class.objects.__class__()) if hasattr(base_class, 'objects') else {}, attrs, class_attrs, class_properties))
def parse_query(self, config_entity, query=None): """ Parses the stored QuerySet components of the DbEntity to create an actual QuerySet :param query: pass in a query instead of using self.query. Used by run_grouped_query to add the group_by :return: """ query = query or self.query if not query: logger.error("Trying to run query for db_entity %s, which has no query defined or passed in" % self.full_name) return {} # Use the base table of the Feature class for now. We'll need to use the rel table soon. manager = config_entity.db_entity_feature_class(self.key, base_feature_class=True).objects if isinstance(query, basestring): # String query # Join the query with the base tables of the feature classes that match the db_entity_keys listed # as named wildcards in the query string They are in the form %(db_entity_key) db_entity_keys = re.findall(r'%\((.+?)\)', query) formatted_query = query % map_to_dict( lambda db_entity_key: [db_entity_key, config_entity.db_entity_feature_class(db_entity_key, base_feature_class=True)._meta.db_table], db_entity_keys) cursor = connection.cursor() cursor.execute(formatted_query) result_value = dictfetchall(cursor) #always return results as a list so that multiple rows can be returned return result_value else: # Combine the query parts return [accumulate(lambda manager, queryset_command: self.parse_and_exec_queryset_command(manager, queryset_command), manager, query)]
def test__db_entity__permissions__match(self): db_entity_key = DemoDbEntityKey.Fab.ricate scenario = Scenario.objects.get(key='irthorn_base_condition') get_db_entity = lambda key: DbEntityInterest.objects.get( db_entity__key=db_entity_key(key), config_entity=scenario, ).db_entity class_to_default_db_entity_permissions = map_to_dict(lambda config_entity: [ config_entity.__class__, ConfigEntityFixture.resolve_config_entity_fixture(config_entity).default_db_entity_permissions() ], [scenario]+scenario.ancestors ) # Create a small configuration to verify permissions PermissionTesting([ # Test Region DbEntity permissions dict( instance=get_db_entity(DemoDbEntityKey.CPAD_HOLDINGS), groups=class_to_default_db_entity_permissions[Region] ), # Test Project permissions dict( instance=get_db_entity(DemoDbEntityKey.PROJECT_EXISTING_LAND_USE_PARCELS), groups=class_to_default_db_entity_permissions[Project], ), # Test Scenario permissions dict( instance=get_db_entity(DemoDbEntityKey.EXISTING_LAND_USE_PARCELS), groups=class_to_default_db_entity_permissions[Scenario], ), ], DemoDbEntityKey).test_permissions()
def annotated_related_feature_class_pk_via_geographies(manager, config_entity, db_entity_keys): """ To join a related model by geographic join """ from footprint.main.models.feature.feature_class_creator import FeatureClassCreator feature_class_creator = FeatureClassCreator.from_dynamic_model_class(manager.model) def resolve_related_model_pk(db_entity_key): related_model = config_entity.db_entity_feature_class(db_entity_key) # The common Geography class geography_class = feature_class_creator.common_geography_class(related_model) geography_scope = feature_class_creator.common_geography_scope(related_model) logger.warn("Resolved geography scope %s", geography_scope) # Find the geographies ManyToMany fields that relates this model to the related_model # via a Geography class. Which geography class depends on their common geography scope geographies_field = feature_class_creator.geographies_field(geography_scope) try: # Find the queryable field name from the geography class to the related model related_model_geographies_field_name = resolve_queryable_name_of_type(geography_class, related_model) except: # Sometimes the geography class hasn't had its fields cached properly. Fix here clear_many_cache(geography_class) related_model_geographies_field_name = resolve_queryable_name_of_type(geography_class, related_model) return "%s__%s__pk" % (geographies_field.name, related_model_geographies_field_name) pk_paths = map_to_dict( lambda db_entity_key: [db_entity_key, Min(resolve_related_model_pk(db_entity_key))], db_entity_keys ) return manager.annotate(**pk_paths)
def full_dehydrate(self, bundle, for_list=False): # Convert the dict to the unmanaged join_feature_class instance # Since our JoinFeatureClass uses the client field names, we must map them back to server names # To do the lookup below field_name_lookup = map_to_dict( lambda field: [string.replace(field.name, self.FIELD_RELATION_CLIENT, self.FIELD_RELATION_SERVER), True], join_feature_class._meta.fields) # Map mapping fields from __ to _x_ so Tastypie|Django isn't confused by them dct = map_dict_to_dict( lambda key, value: [self.client_field_name(key), value] if field_name_lookup.get(key) else None, bundle.obj) obj = join_feature_class() for key, value in dct.items(): setattr(obj, key, value) # The object needs a pk to create its resource_uri setattr(obj, 'pk', obj.id) new_bundle = self.build_bundle(obj=obj, request=bundle.request) # This isn't automatically included like for the normal FeatureResource # The unique id must be unique across all joined Feature classes new_bundle.data['the_unique_id'] = '_'.join( [obj.the_unique_id] +\ map(lambda attr: str(getattr(obj, attr)), self.join_model_attributes) ) return super(JoinFeatureResource, self).full_dehydrate(new_bundle, for_list)
def create_layer_selection(config_entity, layer, attribute_id): db_entity = layer.db_entity_interest.db_entity connection = connection_dict(layer.config_entity.db) tilestache_layers = [] users = set(get_users_with_perms(config_entity)) | set( get_users_with_perms(layer.db_entity_interest.db_entity)) # Make sure layer_selection instances exist for the users from footprint.main.publishing.layer_publishing import update_or_create_layer_selections_for_layer update_or_create_layer_selections_for_layer(layer, users=users) logger.info("Get/Create layer_selection for config_entity %s, layer %s, users %s" %\ (config_entity.key, layer.db_entity_key, ','.join(map(lambda user: user.username, users)))) # Each layer has a dynamic class representing its SelectedFeature table get_or_create_layer_selection_class_for_layer(layer) if not users: return tilestache_layers config_entity.db_entity_feature_class(key=layer.db_entity_key) layer_selection_class = get_or_create_layer_selection_class_for_layer( layer, config_entity) # Take the first user to create a template query user = list(users)[0] # Each LayerSelection instance is per user layer_selection = layer_selection_class.objects.get_or_create(user=user)[0] # Extract the query from the QuerySet query = re.sub( r'"layer_selection_id" = \d+', r'"layer_selection_id" = {user_id}', str( layer_selection.selected_features.values('wkb_geometry', 'id').query)) logger.info( "Creating tilestache layer_selection for layer %s, user %s, query: %s" % (layer.full_name, user.username, query)) user_id_lookup = map_to_dict( lambda layer_selection: [layer_selection.user.id, layer_selection.id], layer_selection_class.objects.all()) # Embed the id in the Geojson for each feature. # Nothing else is needed, since all other attributes can be looked up based on the id id_field = map(lambda field: field.name + '_id', layer_selection.feature_class._meta.parents.values())[0] vector_selection_layer = build_vector_layer_config( parameters=merge( connection, dict(query=query, column="wkb_geometry", user_id_lookup=user_id_lookup)), provider_id_property=id_field, client_id_property=db_entity._meta.pk.name) layer_key = "layer:{layer},attr_id:{attribute},type:{type}".format( layer=layer.id, attribute=attribute_id, type='selection') logger.info("Creating layer %s" % layer_key) tilestache_layers.append( TSLayer(key=layer_key, value=vector_selection_layer)) return tilestache_layers
def full_dehydrate(self, bundle, for_list=False): # Convert the dict to the unmanaged join_feature_class instance # Since our JoinFeatureClass uses the client field names, we must map them back to server names # To do the lookup below field_name_lookup = map_to_dict( lambda field: [ string.replace(field.name, self.FIELD_RELATION_CLIENT, self.FIELD_RELATION_SERVER), True ], join_feature_class._meta.fields) # Map mapping fields from __ to _x_ so Tastypie|Django isn't confused by them dct = map_dict_to_dict( lambda key, value: [self.client_field_name(key), value] if field_name_lookup.get(key) else None, bundle.obj) obj = join_feature_class() for key, value in dct.items(): setattr(obj, key, value) # The object needs a pk to create its resource_uri setattr(obj, 'pk', obj.id) new_bundle = self.build_bundle(obj=obj, request=bundle.request) # This isn't automatically included like for the normal FeatureResource # The unique id must be unique across all joined Feature classes new_bundle.data['the_unique_id'] = '_'.join( [obj.the_unique_id] +\ map(lambda attr: str(getattr(obj, attr)), self.join_model_attributes) ) return super(JoinFeatureResource, self).full_dehydrate(new_bundle, for_list)
def update_summary_results(self, query_result): """ Updates the summary results with the given QuerySet or results list :param self: :param query_result: :return: """ if isinstance(query_result, QuerySet): # Find aggregate and normal field names aggregate_names = query_result.aggregate_names if hasattr(query_result, 'aggregate_names') else [] self.summary_fields = (query_result.field_names if hasattr(query_result, 'field_names') else []) + aggregate_names # Find aggregate and normal field titles aggregate_titles = map_dict(lambda key, value: self.cleanup_title(key), query_result.query.aggregates) if hasattr(query_result.query, 'aggregates') else [] titles = map(lambda tup: self.cleanup_title(tup[1]), query_result.query.select) + aggregate_titles # Create a lookup from field name to title self.summary_field_title_lookup = dual_map_to_dict(lambda key, value: [key, value], self.summary_fields, titles) self.summary_query_sql = str(query_result.query) elif len(query_result) > 0: # For single row aggregates. TODO figure out who to extract the names from the query self.summary_fields = query_result[0].keys() self.summary_field_title_lookup = map_to_dict(lambda key: [key, key], self.summary_fields) self.summary_query_sql = str(query_result.query) self.summary_results = list(query_result) self.save()
def annotated_related_feature_class_pk_via_geographies(manager, config_entity, db_entity_keys): """ To join a related model by geographic join """ from footprint.main.models.feature.feature_class_creator import FeatureClassCreator feature_class_creator = FeatureClassCreator.from_dynamic_model_class(manager.model) def resolve_related_model_pk(db_entity_key): related_model = config_entity.db_entity_feature_class(db_entity_key) # The common Geography class geography_class = feature_class_creator.common_geography_class(related_model) geography_scope = feature_class_creator.common_geography_scope(related_model) logger.warn("Resolved geography scope %s", geography_scope) # Find the geographies ManyToMany fields that relates this model to the related_model # via a Geography class. Which geography class depends on their common geography scope geographies_field = feature_class_creator.geographies_field(geography_scope) try: # Find the queryable field name from the geography class to the related model related_model_geographies_field_name = resolve_queryable_name_of_type(geography_class, related_model) except: # Sometimes the geography class hasn't had its fields cached properly. Fix here clear_many_cache(geography_class) related_model_geographies_field_name = resolve_queryable_name_of_type(geography_class, related_model) return '%s__%s__pk' % (geographies_field.name, related_model_geographies_field_name) pk_paths = map_to_dict(lambda db_entity_key: [db_entity_key, Min(resolve_related_model_pk(db_entity_key))], db_entity_keys) return manager.annotate(**pk_paths)
def db_entity_to_feature_class_lookup(self): """ Returns the db_entity to feature_classes of the config_entity.computed_db_entities() :return: """ return map_to_dict(lambda db_entity: [db_entity, FeatureClassCreator(self.config_entity, db_entity).dynamic_model_class()], filter(lambda db_entity: db_entity.feature_class_configuration, self.config_entity.computed_db_entities()))
def test__db_entity__permissions__match(self): db_entity_key = DemoDbEntityKey.Fab.ricate scenario = Scenario.objects.get(key='irthorn_base_condition') get_db_entity = lambda key: DbEntityInterest.objects.get( db_entity__key=db_entity_key(key), config_entity=scenario, ).db_entity class_to_default_db_entity_permissions = map_to_dict( lambda config_entity: [ config_entity.__class__, ConfigEntityFixture.resolve_config_entity_fixture( config_entity).default_db_entity_permissions() ], [scenario] + scenario.ancestors) # Create a small configuration to verify permissions PermissionTesting( [ # Test Region DbEntity permissions dict(instance=get_db_entity(DemoDbEntityKey.CPAD_HOLDINGS), groups=class_to_default_db_entity_permissions[Region]), # Test Project permissions dict( instance=get_db_entity( DemoDbEntityKey.PROJECT_EXISTING_LAND_USE_PARCELS), groups=class_to_default_db_entity_permissions[Project], ), # Test Scenario permissions dict( instance=get_db_entity( DemoDbEntityKey.EXISTING_LAND_USE_PARCELS), groups=class_to_default_db_entity_permissions[Scenario], ), ], DemoDbEntityKey).test_permissions()
def model_result_field_path_field_lookup(self, related_models, return_related_models=False): """ Returns a mapping of each result_path from the query to its field class (in string form) :param related_models: The models related to the main model by a many-to-many relationship. For Feature classes this relationship is through the geographies many-to-many, but it could be a direct many-to-many :param return_related_models: Default False, if True make the returned lookup value a tuple. The first item is the field class path and the second is the related model for field that are ForeignKeys or AutoField. This is used for JoinFeature queries to resolve foreign key ids to its related model class, since the related model is not available in the query results or values() (join) queries :return: Two items to form a key/value of the dict. The first ist the field_path. The second is field class path or (if return_related_models is True) the field class path and the related model class path """ def map_result_path(field_path): # Get the field and the optional related model field, related_model = resolve_field_of_path(self, field_path, True) field_class_path = full_module_path(field.__class__) # Return return [field_path, field_class_path if\ not return_related_models else\ (field_class_path, full_module_path(related_model) if related_model else None)] return map_to_dict( map_result_path, self.model_result_paths(related_models))
def test_config_entity_api__permissions(self): """ Make sure that users only get ConfigEntity's that match their permission settings :return: """ permission_configuration = TestConfigEntityPermissions.config_entity_configuration() resource_name = 'config_entity' # Iterate through the test_configurstions and extract a user for each group_key # Make a dict with the user as the key and all the instances from the test_config that the # user corresponds to. This gives a lookup of a user to the config_entities that we expect # the user to be able to view # Create a user->instances dict # Combine our {user1:instances, user2:instances,...} dicts user_to_expected_instances = merge_dict_list_values( *map( lambda test_configuration:\ # Combine our [user, instance] pairs into {user1:instances, user2:instances,...} # Remove null keys (owing to groups with no users) compact_dict(map_to_dict_with_lists( # Each test_configuration has several groups. # For each group resolve a user and return [user, instance] lambda group_key: [ get_first_value_or_none(Group.objects.get(name=group_key).user_set.all()), test_configuration['instance']], test_configuration['groups'].keys())), permission_configuration.test_configuration) ) all_instances = set(unique(flatten(user_to_expected_instances.values()))) for user, instances in user_to_expected_instances.items(): other_instances = all_instances - set(instances) # Fetch all instances with this user and create a lookup so we can test # that the resulting instances are present or not present as expected according to # the permissions response = self.get(resource_name, user=user) result_instance_lookup = map_to_dict( lambda instance_dict: [int(instance_dict['id']), instance_dict], self.deserialize(response)['objects']) for instance in instances: matching_instance = result_instance_lookup.get(instance.id) assert_is_not_none(matching_instance, "User %s should have view permission to instance %s with id %s and key %s but does." % \ (user.username, instance, instance.id, permission_configuration.key_class.Fab.remove( permission_configuration.instance_key_lambda(instance)))) for instance in other_instances: assert_is_none(matching_instance, "User %s should not have view permission to instance %s with id %s and key %s but does." % \ (user.username, instance, instance.id, permission_configuration.key_class.Fab.remove( permission_configuration.instance_key_lambda(instance))))
def create_control_totals(self): """ Create a dict of initial control totals :return: """ return map_to_dict( lambda column: [column, 0], self.attributes)
def construct_primary_components(self, client='default'): """ :return: Dictionary keyed by Building name and valued by Building objects (UrbanFootprint v0.1 Built Form default set) """ primary_components = {} for import_primary_component in self.load_crops_csv(client): fields = AgricultureAttributeSet._meta.fields agriculture_attribute_set = remove_keys( map_to_dict( lambda field: [ field.attname, getattr(import_primary_component, field.attname) ], fields), ['id']) agriculture_attribute_set['name'] = import_primary_component.name if import_primary_component.name in primary_components: raise Exception("Duplicate entry for primary component: " + import_primary_component.name) primary_components[import_primary_component.name] = dict( agriculture_attribute_set=agriculture_attribute_set) for import_primary_component in self.load_buildings_csv(client): building_attribute_set = dict( name=import_primary_component.name, address=import_primary_component.address, website=import_primary_component.website, lot_size_square_feet=import_primary_component. lot_size_square_feet, floors=import_primary_component.floors, total_far=import_primary_component.total_far, average_parking_space_square_feet=import_primary_component. average_parking_space_square_feet, surface_parking_spaces=import_primary_component. surface_parking_spaces, below_ground_structured_parking_spaces=import_primary_component .below_ground_parking_spaces, above_ground_structured_parking_spaces=import_primary_component .above_ground_parking_spaces, building_footprint_square_feet=import_primary_component. building_footprint_square_feet, surface_parking_square_feet=import_primary_component. surface_parking_square_feet, hardscape_other_square_feet=import_primary_component. hardscape_other_square_feet, irrigated_softscape_square_feet=import_primary_component. irrigated_softscape_square_feet, nonirrigated_softscape_square_feet=import_primary_component. nonirrigated_softscape_square_feet, irrigated_percent=import_primary_component.irrigated_percent, vacancy_rate=import_primary_component.vacancy_rate, household_size=import_primary_component.household_size) if import_primary_component.name in primary_components: raise Exception("Duplicate entry for primary component: " + import_primary_component.name) primary_components[import_primary_component.name] = dict( building_attribute_set=building_attribute_set) return primary_components
def dynamic_join_model_class(self, join_models, related_field_names): """ Creates an unmanaged subclass of the feature class with extra fields to represent the the fields of the join_models. This also adds fields for any fields specified in the related_model_lookup. This is not for join models but ForeignKeys such as BuiltForm These latter fields must be specified explicitly because the main model and join models can't populate their foreign keys from the query because the query has to be a ValuesQuerySet in order to do the join. So we create id versions of the fields here (e.g. built_form_id) which the query can fill and then use that to manually set the foreign key reference in the Tastypie resource. :param join_models: Other feature models whose attributes should be added to the subclass :param related_field_names: List of field names of foreign key id fields (AutoFields) """ main_model_class = self.dynamic_model_class() manager = main_model_class.objects # Exclude the following field types. Since the base Feature defines an id we'll still get that, which we want exclude_field_types = (ForeignKey, ToManyField, OneToOneField, GeometryField) all_field_paths_to_fields = merge( # Create fields to represented foreign key id fields # Our query fetches these ids since it can't fetch related objects (since its a values() query) map_to_dict( lambda field_name: [field_name.replace('__', '_x_'), IntegerField(field_name.replace('__', '_x_'), null=True)], related_field_names ), # The join fields for each joined related model *map( lambda related_model: related_field_paths_to_fields( manager, related_model, exclude_field_types=exclude_field_types, fields=limited_api_fields(related_model), separator='_x_'), join_models) ) abstract_feature_class = resolve_module_attr(self.configuration.abstract_class_name) # Make sure the class name is unique to the related models and the given ConfigEntity related_models_unique_id = '_'.join(sorted(map(lambda related_model: related_model.__name__, join_models), )) dynamic_model_clazz = dynamic_model_class( main_model_class, self.db_entity.schema, self.db_entity.table, class_name="{0}{1}{2}{3}Join".format( abstract_feature_class.__name__, self.db_entity.id, self.config_entity.id, related_models_unique_id), fields=all_field_paths_to_fields, class_attrs=self.configuration.class_attrs or {}, related_class_lookup=self.configuration.related_class_lookup or {}, is_managed=False, cacheable=False) logger.info("Created dynamic join model class %s" % dynamic_model_clazz) logger.debug("Created with model fields %s" % map(lambda field: field.name, dynamic_model_clazz._meta.fields)) logger.debug("Created with related and join fields %s" % all_field_paths_to_fields) return dynamic_model_clazz
def get_valued_arg_dict(func): """ Returns the args with values specified. :param func: :return: """ args, varargs, varkw, defaults = inspect.getargspec(func) reversed_defaults = reversed(defaults) return map_to_dict(lambda default_value: [args.pop(), default_value], reversed_defaults)
def update_or_create_projects( region_fixtures=config_entities_fixture.regions(), **kwargs): """ Create test projects according to the samples :param project_fixtures: :return: """ regions = update_of_create_regions(region_fixtures, **kwargs) regions_by_key = map_to_dict(lambda region: [region.key, region], regions) project_fixtures = config_entities_fixture.projects() def update_or_create_project(project_dict): if kwargs.get('limit_to_classes' ) and Project not in kwargs['limit_to_classes']: if Project.objects.filter(key=project_dict['key']).count() != 1: raise Exception( "Trying to get Project %s, which hasn't been created" % project_dict['key']) project_tuple = Project.objects.get( key=project_dict['key']), False, False else: project_tuple = Project.objects.update_or_create( key=project_dict['key'], defaults=merge( dict(behavior=get_behavior('default_config_entity'), ), remove_keys(project_dict, ['key', 'base_table', 'region_key', 'media']), dict(parent_config_entity=regions_by_key[ project_dict['region_key']], creator=User.objects.get( username=UserGroupKey.SUPERADMIN)))) logger.info("{update_or_create} Project {config_entity}".format( update_or_create='Created' if project_tuple[1] else 'Updated', config_entity=project_tuple[0])) media = map( lambda medium_config: Medium.objects. update_or_create(key=medium_config.key, defaults=remove_keys( medium_config.__dict__['kwargs'], 'key'))[0], project_dict.get('media', [])) existing_media = project_tuple[0].media.filter( id__in=map(lambda medium: medium.id, media)) media_to_add = set(media) - set(existing_media) if len(media_to_add) > 0: project_tuple[0].media.add(*media_to_add) return project_tuple projects_created_updated = map( lambda project_fixture: update_or_create_project(project_fixture), project_fixtures) return map(lambda project_created_updated: project_created_updated[0], projects_created_updated)
def attribute_fields(self): # introspect the attribute_group_class and cache the results. Return the fields feature_class = self.feature_behavior.db_entity.feature_class field_dict = map_to_dict(lambda field: [field.name, field], feature_class._meta.fields) return map( lambda attribute_name: field_dict[self.attribute_mapping.get( attribute_name, attribute_name)], self.attribute_group.attribute_keys)
def apply_regexes_to_file(file_path, regex_replacements): """ Apply the given regex replacements to the given file. The file is updated in memory and then written back to the same file. :param file_path: The file path to read and modify :param regex_replacements: A list of tuples to apply in order. Each tuple consists of two or three items First is the match pattern. This is compiled to a regex. Second is the replace pattern. The optional third is tuple object which specifies start and end for lines to apply to, inclusive. Alternatively specify a 0-based index which means only apply this regex once a previous regex in regex_replacements matches for the first time. Example: [('replace_me', 'with_me', (0, 9)), # Apply to the first 10 lines ('and_replace_m', 'with_this', 0), # Do this replace only after the first regex matches once :return: """ # Create temp file fh, full_path = mkstemp() # Create a compiled version of each regex for speed compiled = map_to_dict(lambda regex_replacement: [regex_replacement[0], re.compile(regex_replacement[0])], regex_replacements) # Track the matched regexes matched_regexes = {} i = 0 with codecs.open(full_path, 'w', 'utf-8') as new_file: with codecs.open(file_path, 'r', 'utf-8') as old_file: # Iterate through the lines of the file for line in old_file: regex_index = 0 # Iterate through the regexes for regex_tuple in regex_replacements: option = regex_tuple[2] if len(regex_tuple)==3 else None if not option or ( # option is range and matches (isinstance(option, tuple) and option[0] <= i <= option[1]) or # option is index of regex that has already matched (isinstance(option, numbers.Number) and matched_regexes.get(option)) ): # Make the replacement replacement = compiled[regex_tuple[0]].sub(regex_tuple[1], line) # Mark the replacement as succeeded if anything matches matched_regexes[regex_index] = matched_regexes.get(regex_index, replacement != line) line = replacement regex_index += 1 new_file.write(line) i += 1 # Remove original file remove(file_path) # Move new file move(full_path, file_path) # For some reason the default write permission is too low os.chmod(file_path, 0664)
def key_to_abstract_model_class_lookup(cls, configuration_or_containers): """ Like self.db_entity_key_to_feature_class_lookup, but used when no ConfigEntity is in scope. This returns the abstract version of the Feature subclasses by self.db_entity_key :param configuration_or_containers: :return: """ return map_to_dict(lambda configuration: [configuration.key, resolve_module_attr(configuration.abstract_class_name)], map(lambda configuration: cls.resolve_configuration(configuration), configuration_or_containers))
def result_field_extra_sql_lookup(self, query_set): """ Returns a lookup that maps some field names to a lambda that describes how to create a human readable form of the field value. This is used by the FeatureResource :return: """ return dict(select=compact_dict( map_to_dict(self.sql_map_lambda(query_set), self.result_map.result_fields, use_ordered_dict=True)))
def sync_permissions(self, additional_permissions=None, permission_key_class=PermissionKey, superior_permission_lookup={}): """ Syncs the instance permissions using the current values of group_permission_configuration. The superior groups of the object's and additional_permissions are also synced to have equal or higher permissions. They only get higher permissions if they match an entry in superior_permissions :param created: True if the instance was just created. :param additional_permissions: A dict representing permissions to use in additions to any stored in self.group_permission_configuration. This is used for groups that are dynamically created for the ConfigEntity :param permission_key_class: Default PermissionKey, pass a subclass to if a Permissions implementer implements extra permissions :param superior_permission_lookup: When the superior Group permissions are set, this is checked to see if the group should receive higher permissions than those of the subordinates. If a key match is found, the permission value is used. Otherwise the subordinate value is used :return: """ # Add the permissions given in group_permissions_configuration configuration = merge(self.group_permission_configuration or {}, additional_permissions or {}) if configuration: # Get all GroupHierarchy instances to save making extra queries group_hierarchy_lookup = map_to_dict( lambda group_hierarchy: [group_hierarchy.group.name, group_hierarchy], GroupHierarchy.objects.filter(group__name__in=configuration.keys())) for group_name, permission_key in configuration.items(): # Assign the permission to the group to the AttributeGroupConfiguration object # We can have multiple permission_keys if the permission_key is PermissionKey.ALL try: subordinate_group_hierarchy = group_hierarchy_lookup[group_name] except: raise Exception("Couldn't find group %s among group_hierarcy_lookup, which has keys %s" % (group_name, ', '.join(group_hierarchy_lookup.keys()))) subordinate_group = subordinate_group_hierarchy.group # Remove any permissions this group has on the object, in case we changed # the configuration for class_permission_key in permission_key_class.permission_keys(permission_key_class.ALL, self.__class__): remove_perm(class_permission_key, subordinate_group, self) logger.info("Setting permissions for %s and its superiors that don't have their own configurations", subordinate_group.name) # Find superior groups that aren't explicitly listed in the configuration. # These get the same permissions as the subordinate group, unless a higher # permission is specified in superior_permission_lookup groups = set([subordinate_group]) | set(subordinate_group_hierarchy.all_superiors()) report = {} for group in filter(lambda group: group == subordinate_group or group.name not in configuration.keys(), groups): permission = self.best_matching_permission(group, superior_permission_lookup) self.assign_permission_to_groups([group], permission, permission_key_class=permission_key_class) if permission not in report: report[permission] = [] report[permission].append(group.name) # Log the results for permission, group_names in report.iteritems(): logger.info("For class %s, instance %s assigned permission key %s to groups %s", self.__class__.__name__, self.key, permission, ', '.join(group_names))
def result_field_extra_sql_lookup(self, query_set): """ Returns a lookup that maps some field names to a lambda that describes how to create a human readable form of the field value. This is used by the FeatureResource :return: """ return dict(select=compact_dict(map_to_dict( self.sql_map_lambda(query_set), self.result_map.result_fields, use_ordered_dict=True )))
def construct_sample_placetype_components(self, sample_placetypes): sample_placetype_components = [] all_placetype_components = self.construct_placetype_components(client='default') placetype_component_dict = map_to_dict( lambda placetype_component: [placetype_component['building_attribute_set']['name'], placetype_component], all_placetype_components ) sample_buildingtype_percents = self.construct_placetype_component_percents( map_to_dict(lambda placetype: [placetype['building_attribute_set']['name'], placetype], sample_placetypes), placetype_component_dict) placetype_components = [] for placetype, components in sample_buildingtype_percents.items(): for placetype_components, attributes in components['placetype_components'].items(): placetype_components.append(placetype_components) for placetype_components in set(placetype_components): sample_placetype_components.append({'building_attribute_set': {'name': placetype_components}}) return sample_placetype_components
def related_field_paths_to_fields(manager, related_model, exclude_field_types=(), fields=None, excludes=[], separator=None): """ Iterates through the fields of the related model, appending each to the related model field name from the main model. Returns the dict of related field paths as keys valued by the field. :param exclude_field_types. Optional tuple of Field classes that should be filtered out. :param separator: Optional separator with which to replace __ in the related_field_paths """ related_field = resolve_related_model_path_via_geographies(manager, related_model) return map_to_dict( lambda field: _field_path_and_cloned_field(related_field,field, separator) if \ field_predicate(field, exclude_field_types, fields, excludes) else None, related_model._meta.fields)
def construct_sample_built_forms(self, client): """ Builds a sample set of built forms for testing """ placetypes = self.construct_sample_placetypes() buildingtypes = self.construct_sample_placetype_components(placetypes) building_percents = self.construct_sample_primary_component_percents(buildingtypes) buildings = [building['building'] for building in building_percents] building_dict = map_to_dict(lambda building: [building['building_attribute_set']['name'], building], buildings) buildingtype_dict = map_to_dict( lambda buildingtype: [buildingtype['building_attribute_set']['name'], buildingtype], buildingtypes) return {'placetypes': placetypes, 'placetype_components': buildingtypes, 'primary_components': buildings, 'primary_component_percents': building_percents, 'building_use_percents': self.construct_building_use_percents(building_dict, client=client), 'placetype_component_percents': self.construct_placetype_component_percents( map_to_dict(lambda placetype: [placetype['building_attribute_set']['name'], placetype], placetypes), buildingtype_dict, client=client)}
def create_result_map(self, related_models=[], map_path_segments={}): """ Given the field_paths of the queryset, returns a ResultMap instance. ResultMap.result_fields is a list of field_paths minus specifically omitted ones-- the parent id and geometry column. ResultMap.title_lookup is a lookup from the field_path to a title appropriate for the user. The generated title uses '.' in place of '__' ResultMap.value_map is a lookup from the field_path to a property path that describes how to convert the value to a more human readable form. This is used to convert instances to a readable label and dates, etc, to a more readable format. :param: related_models: pass the related_models represented in the query results so that unneeded paraent reference fields can be removed from the result fields :param: map_path_segments: An optional dict that matches segments of the field_paths. The value corresponding the key is the name to convert it to for the title. If the value is None it will be eliminated from the path when it is rejoined with '.' """ result_paths = self.model_result_paths(related_models) # Get a mapping of the each result field_path to its field class path along # with the related model of that field, if the field is a ForeignKey or AutoField result_field_path_lookup = self.model_result_field_path_field_lookup( related_models, True) join_models = map(lambda model: full_module_path(model.__base__), related_models) return ResultMap( # Replace '__' with '_x_'. We can't use __ because it confuses tastypie result_fields=map(lambda path: string.replace(path, '__', '_x_'), result_paths), # Create a lookup from field name to title # The only processing we do to the title is to remove the middle path title_lookup=map_to_dict( lambda path: [ # Replace '__' with '_x_'. We can't use __ because it confuses tastypie string.replace(path, '__', '_x_'), # match each segment to map_path_segments or failing that return the segment # remove segments that map to None '__'.join( compact( map( lambda segment: map_path_segments.get( segment, segment), path.split('__')))) ], result_paths), field_lookup=map_dict_to_dict( lambda field_path, tup: [field_path, tup[0]], result_field_path_lookup), related_model_lookup=compact_dict( map_dict_to_dict(lambda field_path, tup: [field_path, tup[1]], result_field_path_lookup)), join_models=join_models, )
def update_or_create_projects(region_fixtures=config_entities_fixture.regions(), **kwargs): """ Create test projects according to the samples :param project_fixtures: :return: """ regions = update_of_create_regions(region_fixtures, **kwargs) regions_by_key = map_to_dict(lambda region: [region.key, region], regions) project_fixtures = config_entities_fixture.projects() def update_or_create_project(project_dict): if kwargs.get('limit_to_classes') and Project not in kwargs['limit_to_classes']: if Project.objects.filter(key=project_dict['key']).count() != 1: raise Exception("Trying to get Project %s, which hasn't been created" % project_dict['key']) project_tuple = Project.objects.get(key=project_dict['key']), False, False else: project_tuple = Project.objects.update_or_create( key=project_dict['key'], defaults=merge( dict( behavior=get_behavior('default_config_entity'), ), remove_keys(project_dict, ['key', 'base_table', 'region_key', 'media']), dict( parent_config_entity=regions_by_key[project_dict['region_key']], creator=User.objects.get(username=UserGroupKey.SUPERADMIN) ) )) logger.info("{update_or_create} Project {config_entity}".format(update_or_create='Created' if project_tuple[1] else 'Updated', config_entity=project_tuple[0])) media = map(lambda medium_config: Medium.objects.update_or_create( key=medium_config.key, defaults=remove_keys(medium_config.__dict__['kwargs'], 'key'))[0], project_dict.get('media', [])) existing_media = project_tuple[0].media.filter(id__in=map(lambda medium: medium.id, media)) media_to_add = set(media) - set(existing_media) if len(media_to_add) > 0: project_tuple[0].media.add(*media_to_add) return project_tuple projects_created_updated = map( lambda project_fixture: update_or_create_project(project_fixture), project_fixtures) return map(lambda project_created_updated: project_created_updated[0], projects_created_updated)
def related_field_paths_to_fields( manager, related_model, exclude_field_types=(), fields=None, excludes=[], separator=None ): """ Iterates through the fields of the related model, appending each to the related model field name from the main model. Returns the dict of related field paths as keys valued by the field. :param exclude_field_types. Optional tuple of Field classes that should be filtered out. :param separator: Optional separator with which to replace __ in the related_field_paths """ related_field = resolve_related_model_path_via_geographies(manager, related_model) return map_to_dict( lambda field: _field_path_and_cloned_field(related_field, field, separator) if field_predicate(field, exclude_field_types, fields, excludes) else None, related_model._meta.fields, )
def create_layer_selection(config_entity, layer, attribute_id): db_entity = layer.db_entity_interest.db_entity connection = connection_dict(layer.config_entity.db) tilestache_layers = [] users = set(get_users_with_perms(config_entity)) | set(get_users_with_perms(layer.db_entity_interest.db_entity)) # Make sure layer_selection instances exist for the users from footprint.main.publishing.layer_publishing import update_or_create_layer_selections_for_layer update_or_create_layer_selections_for_layer(layer, users=users) logger.info("Get/Create layer_selection for config_entity %s, layer %s, users %s" %\ (config_entity.key, layer.db_entity_key, ','.join(map(lambda user: user.username, users)))) # Each layer has a dynamic class representing its SelectedFeature table get_or_create_layer_selection_class_for_layer(layer) if not users: return tilestache_layers config_entity.db_entity_feature_class(key=layer.db_entity_key) layer_selection_class = get_or_create_layer_selection_class_for_layer(layer, config_entity) # Take the first user to create a template query user = list(users)[0] # Each LayerSelection instance is per user layer_selection = layer_selection_class.objects.get_or_create(user=user)[0] # Extract the query from the QuerySet query = re.sub( r'"layer_selection_id" = \d+', r'"layer_selection_id" = {user_id}', str(layer_selection.selected_features.values('wkb_geometry', 'id').query)) logger.info("Creating tilestache layer_selection for layer %s, user %s, query: %s" % (layer.full_name, user.username, query)) user_id_lookup = map_to_dict(lambda layer_selection: [layer_selection.user.id, layer_selection.id], layer_selection_class.objects.all()) # Embed the id in the Geojson for each feature. # Nothing else is needed, since all other attributes can be looked up based on the id id_field = map(lambda field: field.name + '_id', layer_selection.feature_class._meta.parents.values())[0] vector_selection_layer = build_vector_layer_config( parameters=merge(connection, dict(query=query, column="wkb_geometry", user_id_lookup=user_id_lookup)), provider_id_property=id_field, client_id_property=db_entity._meta.pk.name ) layer_key = "layer:{layer},attr_id:{attribute},type:{type}".format(layer=layer.id, attribute=attribute_id, type='selection') logger.info("Creating layer %s" % layer_key) tilestache_layers.append(TSLayer(key=layer_key, value=vector_selection_layer)) return tilestache_layers
def create_result_map(self, related_models=[], map_path_segments={}): """ Given the field_paths of the queryset, returns a ResultMap instance. ResultMap.result_fields is a list of field_paths minus specifically omitted ones-- the parent id and geometry column. ResultMap.title_lookup is a lookup from the field_path to a title appropriate for the user. The generated title uses '.' in place of '__' ResultMap.value_map is a lookup from the field_path to a property path that describes how to convert the value to a more human readable form. This is used to convert instances to a readable label and dates, etc, to a more readable format. :param: related_models: pass the related_models represented in the query results so that unneeded paraent reference fields can be removed from the result fields :param: map_path_segments: An optional dict that matches segments of the field_paths. The value corresponding the key is the name to convert it to for the title. If the value is None it will be eliminated from the path when it is rejoined with '.' """ result_paths = self.model_result_paths(related_models) # Get a mapping of the each result field_path to its field class path along # with the related model of that field, if the field is a ForeignKey or AutoField result_field_path_lookup = self.model_result_field_path_field_lookup(related_models, True) join_models = map(lambda model: full_module_path(model.__base__), related_models) return ResultMap( # Replace '__' with '_x_'. We can't use __ because it confuses tastypie result_fields=map(lambda path: string.replace(path, '__', '_x_'), result_paths), # Create a lookup from field name to title # The only processing we do to the title is to remove the middle path title_lookup=map_to_dict( lambda path: [ # Replace '__' with '_x_'. We can't use __ because it confuses tastypie string.replace(path, '__', '_x_'), # match each segment to map_path_segments or failing that return the segment # remove segments that map to None '__'.join(compact( map( lambda segment: map_path_segments.get(segment, segment), path.split('__') ) )) ], result_paths ), field_lookup=map_dict_to_dict(lambda field_path, tup: [field_path, tup[0]], result_field_path_lookup), related_model_lookup=compact_dict(map_dict_to_dict(lambda field_path, tup: [field_path, tup[1]], result_field_path_lookup)), join_models=join_models, )
def construct_placetype_component_percents(self, placetype_dict, placetype_component_dict, client): """ :return: """ import_placetype_components = [ placetype_component for placetype_component in self.load_buildingtype_csv(client) ] input_placetypes = self.load_placetype_csv(client) input_placetype_dict = map_to_dict( lambda input_placetype: [input_placetype.name, input_placetype], input_placetypes) placetype_component_dict = dict() for name, placetype in placetype_dict.items(): placetype_name = placetype['building_attribute_set']['name'].strip( ) for input_placetype_component in import_placetype_components: placetype_component_percent = getattr( input_placetype_component, input_placetype_dict[placetype_name].clean_name) placetype_component_name = input_placetype_component.name.strip( ) default_placetype_component_dict = dict() placetype_component_dict[ placetype_name] = placetype_component_dict.get( placetype_name, default_placetype_component_dict) if placetype_component_percent > 0: category = input_placetype_component.category.strip() if category: placetype_component_dict[placetype_name][ placetype_component_name] = { 'category': category, 'percent': placetype_component_percent, } return placetype_component_dict
def construct_placetypes(self, client): """ :return: PlaceType objects (UrbanFootprint v0.1 Built Form default set) """ placetypes = [] for placetype in self.load_placetype_csv(client): building_attribute_set = dict(name=placetype.name) placetype = dict( type='urban_placetype', building_attribute_set=building_attribute_set, color=placetype.color if placetype.color else '#B0B0B0', intersection_density=placetype.intersection_density ) placetypes.append(placetype) return map_to_dict( lambda placetype: [placetype['building_attribute_set']['name'], placetype], placetypes)
def model_dict(model_instance, include_null=False, include_many=False, omit_fields=[], include_primary_key=False): """ Returns a dict keyed by field name and valued by model_instance's corresponding field value Primary keys are not included :param model_instance: The model instance :param include_null: Default False, set True to return fields that evalate to null :param omit_fields: Default [], list fields to omit. This is good if there are fields that throw an error if null or are simply unwanted """ if not model_instance: return dict() opts = model_instance.__class__._meta return map_to_dict(lambda field: field_value(model_instance, field, include_null), filter(lambda field: not (field.primary_key and not include_primary_key) and field.name not in omit_fields, opts.fields + (opts.many_to_many if include_many else []) ) )
def __new__(meta, name, bases, attrs): # Register any additional model fields specified in fields def add_field(sender, **kwargs): if sender.__name__ == computed_name: for field_name, field in fields.items(): field.contribute_to_class(sender, field_name) class_prepared.connect(add_field) def create_class_property(class_attr): related_attr = class_attr.split('__')[0] related_class_name = related_class_lookup.get( related_attr, None) if not related_class_name: raise Exception( "Expected related_class_lookup to contain %s, since class_attrs contain %s" % (related_attr, class_attr)) related_class = resolve_module_attr(related_class_name) # Create the getter property that uses the class manager to lookup up the related model by id def getter(cls): return related_class.objects.get( id=getattr(cls, class_attr)) return ClassProperty(classmethod(getter)) # Create class-level getter properties to resolve things like the config_entity since we only store the id class_properties = map_to_dict( lambda class_attr: [class_attr.split('__')[0], create_class_property(class_attr)], filter(lambda class_attr: class_attr.endswith('__id'), class_attrs)) return models.base.ModelBase.__new__( meta, computed_name, (base_class, ), # Merge the manager objects (if the abstract_class has one) with attrs and class_attrs merge( dict(objects=base_class.objects.__class__()) if hasattr( base_class, 'objects') else {}, attrs, class_attrs, class_properties))
def reduce_dict_to_difference(dct, comparison_dict, deep=True): """ Given a dict dct and a similar dict comparison dict, return a new dict that only contains the key/values of dct that are different than comparison dict, whether it's a key not in comparison_dict or a matching key with a different value. Specify deep=True to do a comparison of internal dicts # TODO This could handle list comparison better for deep=True. Right now it just marks the lists as different if they are not equal :param dct: :param comparison_dict: :param deep: Default True, compares embedded dictionaries by recursing :return: A new dict containing the differences """ differ = DictDiffer(dct, comparison_dict) return merge( # Find keys and key values changed at the top level map_to_dict(lambda key: [key, dct[key]], flatten([differ.added(), differ.changed()])), # If deep==True recurse on dictionaries defined on the values *map(lambda key: reduce_dict_to_difference(*map(lambda dictionary: dictionary[key], [dct, comparison_dict])), # recurse on inner each dict pair # Find just the keys with dict values filter(lambda key: isinstance(dct[key], dict), differ.unchanged())) if deep else {} )
def construct_primary_components(self, client='default'): """ :return: Dictionary keyed by Building name and valued by Building objects (UrbanFootprint v0.1 Built Form default set) """ primary_components = {} for import_primary_component in self.load_crops_csv(client): fields = AgricultureAttributeSet._meta.fields agriculture_attribute_set = remove_keys(map_to_dict( lambda field: [field.attname, getattr(import_primary_component, field.attname)], fields), ['id']) agriculture_attribute_set['name'] = import_primary_component.name if import_primary_component.name in primary_components: raise Exception("Duplicate entry for primary component: " + import_primary_component.name) primary_components[import_primary_component.name] = dict(agriculture_attribute_set=agriculture_attribute_set) for import_primary_component in self.load_buildings_csv(client): building_attribute_set = dict( name=import_primary_component.name, address=import_primary_component.address, website=import_primary_component.website, lot_size_square_feet=import_primary_component.lot_size_square_feet, floors=import_primary_component.floors, total_far=import_primary_component.total_far, average_parking_space_square_feet=import_primary_component.average_parking_space_square_feet, surface_parking_spaces=import_primary_component.surface_parking_spaces, below_ground_structured_parking_spaces=import_primary_component.below_ground_parking_spaces, above_ground_structured_parking_spaces=import_primary_component.above_ground_parking_spaces, building_footprint_square_feet=import_primary_component.building_footprint_square_feet, surface_parking_square_feet=import_primary_component.surface_parking_square_feet, hardscape_other_square_feet=import_primary_component.hardscape_other_square_feet, irrigated_softscape_square_feet=import_primary_component.irrigated_softscape_square_feet, nonirrigated_softscape_square_feet=import_primary_component.nonirrigated_softscape_square_feet, irrigated_percent=import_primary_component.irrigated_percent, vacancy_rate=import_primary_component.vacancy_rate, household_size=import_primary_component.household_size ) if import_primary_component.name in primary_components: raise Exception("Duplicate entry for primary component: " + import_primary_component.name) primary_components[import_primary_component.name] = dict(building_attribute_set=building_attribute_set) return primary_components
def parse_query(self, config_entity, query=None): """ Parses the stored QuerySet components of the DbEntity to create an actual QuerySet :param query: pass in a query instead of using self.query. Used by run_grouped_query to add the group_by :return: """ query = query or self.query if not query: logger.error( "Trying to run query for db_entity %s, which has no query defined or passed in" % self.full_name) return {} # Use the base table of the Feature class for now. We'll need to use the rel table soon. manager = config_entity.db_entity_feature_class( self.key, base_feature_class=True).objects if isinstance(query, basestring): # String query # Join the query with the base tables of the feature classes that match the db_entity_keys listed # as named wildcards in the query string They are in the form %(db_entity_key) db_entity_keys = re.findall(r'%\((.+?)\)', query) formatted_query = query % map_to_dict( lambda db_entity_key: [ db_entity_key, config_entity.db_entity_feature_class( db_entity_key, base_feature_class=True)._meta.db_table ], db_entity_keys) cursor = connection.cursor() cursor.execute(formatted_query) result_value = dictfetchall(cursor) #always return results as a list so that multiple rows can be returned return result_value else: # Combine the query parts return [ accumulate( lambda manager, queryset_command: self. parse_and_exec_queryset_command(manager, queryset_command), manager, query) ]
def instantiate_sub_class(self, feature_class, feature): """ Instantiates an instance of the dynamic subclass of GeoJsonFeature based on the given feature. :param feature: A feature parsed django-geojson. The feature is actually reserialized to json in order to construct a GEOSGeometry instance. :return: An instance of the GeoJsonFeature subclass, which contains the geometry, properties of the feature, and perhaps the crs """ # TODO, crs should be read from the geojson when present. # This crs isn't actually picked up by the GEOSGeometry constructor srid = settings.SRID_PREFIX.format(settings.DEFAULT_SRID) crs = { "type": "name", "properties": { "name": srid } } # Ironically, we have to rejsonify the data so that GEOSGeometry can parse the feature as json json = jsonify({'type':feature.geometry.type, 'coordinates':feature.geometry.coordinates, 'crs':crs}) geometry = GEOSGeometry(json) field_dict = map_to_dict(lambda field: [field.name, feature.properties[field.name]], filter(lambda field: feature.properties.get(field.name, None), feature_class._meta.fields)) return feature_class(wkb_geometry=geometry, **field_dict)