def built_form_sets(config_entity): """ Constructs and persists buildings, buildingtypes, and placetypes and their associates and then returns them all as a persisted BuiltFormSet. One BuiltFormSet is returned in an array :param test: if test is set to true, a much more limited set of built forms is created """ from footprint.client.configuration.fixture import BuiltFormFixture from footprint.client.configuration.utils import resolve_fixture json_fixture = os.path.join(settings.ROOT_PATH, 'built_form_fixture.json') built_form_fixture = resolve_fixture("built_form", "built_form", BuiltFormFixture, settings.CLIENT, config_entity=config_entity) if settings.IMPORT_BUILT_FORMS == 'CSV' or (not os.path.exists(json_fixture)): logger.info('Importing built forms from csv source') # Get the fixture scoped for the config_entity # Create any built_form class sets that are configured for the client at the config_entity's class scope built_forms_dict = built_form_fixture.built_forms() built_form_fixture.tag_built_forms(built_forms_dict) built_forms = flatten(built_forms_dict.values()) return map( lambda built_form_set_config: update_or_create_built_form_set(built_form_set_config, built_forms), built_form_fixture.built_form_sets()) elif settings.IMPORT_BUILT_FORMS == 'JSON' and not BuiltForm.objects.count(): logger.info('Importing built forms from json fixture at ' + json_fixture) call_command('loaddata', json_fixture) return {}
def test_config_entity_api__permissions(self): """ Make sure that users only get ConfigEntity's that match their permission settings :return: """ permission_configuration = TestConfigEntityPermissions.config_entity_configuration() resource_name = 'config_entity' # Iterate through the test_configurstions and extract a user for each group_key # Make a dict with the user as the key and all the instances from the test_config that the # user corresponds to. This gives a lookup of a user to the config_entities that we expect # the user to be able to view # Create a user->instances dict # Combine our {user1:instances, user2:instances,...} dicts user_to_expected_instances = merge_dict_list_values( *map( lambda test_configuration:\ # Combine our [user, instance] pairs into {user1:instances, user2:instances,...} # Remove null keys (owing to groups with no users) compact_dict(map_to_dict_with_lists( # Each test_configuration has several groups. # For each group resolve a user and return [user, instance] lambda group_key: [ get_first_value_or_none(Group.objects.get(name=group_key).user_set.all()), test_configuration['instance']], test_configuration['groups'].keys())), permission_configuration.test_configuration) ) all_instances = set(unique(flatten(user_to_expected_instances.values()))) for user, instances in user_to_expected_instances.items(): other_instances = all_instances - set(instances) # Fetch all instances with this user and create a lookup so we can test # that the resulting instances are present or not present as expected according to # the permissions response = self.get(resource_name, user=user) result_instance_lookup = map_to_dict( lambda instance_dict: [int(instance_dict['id']), instance_dict], self.deserialize(response)['objects']) for instance in instances: matching_instance = result_instance_lookup.get(instance.id) assert_is_not_none(matching_instance, "User %s should have view permission to instance %s with id %s and key %s but does." % \ (user.username, instance, instance.id, permission_configuration.key_class.Fab.remove( permission_configuration.instance_key_lambda(instance)))) for instance in other_instances: assert_is_none(matching_instance, "User %s should not have view permission to instance %s with id %s and key %s but does." % \ (user.username, instance, instance.id, permission_configuration.key_class.Fab.remove( permission_configuration.instance_key_lambda(instance))))
def reduce_dict_to_difference(dct, comparison_dict, deep=True): """ Given a dict dct and a similar dict comparison dict, return a new dict that only contains the key/values of dct that are different than comparison dict, whether it's a key not in comparison_dict or a matching key with a different value. Specify deep=True to do a comparison of internal dicts # TODO This could handle list comparison better for deep=True. Right now it just marks the lists as different if they are not equal :param dct: :param comparison_dict: :param deep: Default True, compares embedded dictionaries by recursing :return: A new dict containing the differences """ differ = DictDiffer(dct, comparison_dict) return merge( # Find keys and key values changed at the top level map_to_dict(lambda key: [key, dct[key]], flatten([differ.added(), differ.changed()])), # If deep==True recurse on dictionaries defined on the values *map(lambda key: reduce_dict_to_difference(*map(lambda dictionary: dictionary[key], [dct, comparison_dict])), # recurse on inner each dict pair # Find just the keys with dict values filter(lambda key: isinstance(dct[key], dict), differ.unchanged())) if deep else {} )
def hydrate_presentations(self, bundle): """ Does the reverse of dehydrate_presentations. If the user actually wanted to create new presentations via the API they'd simply save a presentation pointing to the correct configEntity, so we could probably just disregard this list on post/patch/put. :param bundle: :return: """ if bundle.data.get('id', 0) == 0: # We can't handle presentations on new config_entities yet. # One problem is that tastypie/django doesn't like presentations that are actually # layer_libraries and result_libraries that have layers and results, respectively bundle.data['presentations'] = None return bundle if bundle.data.get('presentations', None): bundle.data['presentations'] = flatten(bundle.data['presentations'].values()) if isinstance(bundle.data['presentations'], dict) else bundle.data['presentations'] else: bundle.data['presentations'] = [] return bundle
def aggregate_within_variable_distance(distance_options): thread_count = count_cores() queue = queue_process() sql_format = 'out {formatter} float'.format(formatter="{0}") output_field_format = create_sql_calculations(distance_options['variable_field_list'], sql_format, ', ') sql_format = 'cast({aggregation_type}({formatter}) as float) as {formatter}'.format(formatter="{0}", aggregation_type=distance_options['aggregation_type']) sql_calculations_format = create_sql_calculations(distance_options['variable_field_list'], sql_format, ', ') pSql = ''' drop function if exists aggregate_within_variable_distance_tool( in_id int, in_distance float, in_geometry geometry, out id int, out wkb_geometry geometry, {output_field_format}) cascade;'''.format( output_field_format=output_field_format) execute_sql(pSql) pSql = ''' CREATE OR REPLACE FUNCTION aggregate_within_variable_distance_tool( in_id int, id_distance float, in_geometry geometry, out id int, out wkb_geometry geometry, {output_field_format}) AS $$ select $1 as id, $3 as wkb_geometry, {sql_calculations_format} from {source_table} ref WHERE st_dwithin($3, ref.wkb_geometry, $2) and (ref.{source_table_query}); $$ COST 10000 language SQL STABLE strict; '''.format(source_table=distance_options['source_table'], source_table_query=distance_options['source_table_query'], output_field_format=output_field_format, sql_calculations_format=sql_calculations_format) execute_sql(pSql) drop_table('{target_table_schema}.{target_table}_{suffix}'.format( target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'], suffix=distance_options['suffix'])) sql_format = '{formatter} float'.format(formatter="{0}", suffix=distance_options['suffix']) output_table_field_format = create_sql_calculations(distance_options['variable_field_list'], sql_format, ', ') pSql = '''create table {target_table_schema}.{target_table}_{suffix} (id int, wkb_geometry geometry, {output_table_field_format});'''.format( target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'], suffix=distance_options['suffix'], output_table_field_format=output_table_field_format) execute_sql(pSql) pSql = 'select cast(id as int) from {source_table} where id is not null order by id'.format( source_table=distance_options['source_table']) id_list = flatten(report_sql_values(pSql, 'fetchall')) insert_sql = ''' insert into {target_table_schema}.{target_table}_{suffix} select (f).* from ( select aggregate_within_variable_distance_tool(id, distance, wkb_geometry) as f from {source_table} where id >= {bottom_range_id} and id <= {top_range_id} and {source_table_query} ) s where (f).id is not null; '''.format( target_table_schema=distance_options['target_table_schema'], source_table_query=distance_options['source_table_query'], target_table=distance_options['target_table'], source_table=distance_options['source_table'], suffix=distance_options['suffix'], bottom_range_id="{start_id}", top_range_id="{end_id}") for i in range(thread_count): t = MultithreadProcess(queue, insert_sql) t.setDaemon(True) t.start() #populate queue with data rows_per_thread = len(id_list) / thread_count offset = 0 for i in range(thread_count): if i == thread_count - 1: ## last bucket gets any remainder, too last_thread = len(id_list) - 1 else: last_thread = offset + rows_per_thread - 1 rows_to_process = { 'start_id': id_list[offset], 'end_id': id_list[last_thread] } offset += rows_per_thread queue.put(rows_to_process) #wait on the queue until everything has been processed queue.join() add_attribute_idx(distance_options['target_table_schema'], '{target_table}_{suffix}'.format(target_table=distance_options['target_table'], suffix=distance_options['suffix']), 'id') update_table_field_format = create_sql_calculations(distance_options['variable_field_list'], '{0} = (case when b.{0}_var is null then 0 else b.{0}_var end)', ', ') select_format = create_sql_calculations(distance_options['variable_field_list'], '{0} as {0}_var', ', ') pSql = ''' update {target_table_schema}.{target_table} a set {update_table_field_format} from (select id as {suffix}_id, wkb_geometry, {select_format} from {target_table_schema}.{target_table}_{suffix}) b where st_intersects(st_centroid(a.analysis_geom), b.wkb_geometry) and {target_table_query}; '''.format( target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'], target_table_query=distance_options['target_table_query'], target_table_pk=distance_options['target_table_pk'], update_table_field_format=update_table_field_format, select_format=select_format, suffix=distance_options['suffix'] ) execute_sql(pSql) drop_table('{target_table_schema}.{target_table}_{suffix}'.format( target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'], suffix=distance_options['suffix']))
def calculate_distance(distance_options): print 'Calculating distance from target features areas' ##ST_DISTANCE returns distances in meters from geometries in WGS84 projection if set to false thread_count = count_cores() queue = queue_process() #if the source table query has not results set all values to the max and break zero_values_check = report_sql_values( '''select sum(*) from {source_table} where {source_table_query};'''.format(**distance_options), 'fetchone') if len(zero_values_check) == 0: pSql = ''' update {target_table_schema}.{target_table} a set {column} = {maximum_distance} where {target_table_query} and {column} = 0 '''.format(**distance_options) execute_sql(pSql) return pSql = '''drop function if exists distance_tool( in_id int, in_wkb_geometry geometry, out id int, out {column} float) cascade;'''.format(**distance_options) execute_sql(pSql) pSql = ''' CREATE OR REPLACE FUNCTION distance_tool( in_id int, in_wkb_geometry geometry, out id int, out {column} float) AS $$ select $1 as id, cast(st_distance(st_centroid($2), st_centroid(ref.geometry)) as float) as {column} from (select *, {source_geometry_column} as geometry from {source_table}) ref where ST_DWITHIN($2, ref.geometry, {maximum_distance}) and ({source_table_query}) order by {column}; $$ COST 10000 language SQL STABLE strict; '''.format(**distance_options) execute_sql(pSql) drop_table('{target_table_schema}.{target_table}_{column}'.format(**distance_options)) pSql = ''' create table {target_table_schema}.{target_table}_{column} (id int, {column} float); '''.format(**distance_options) execute_sql(pSql) id_list = flatten(report_sql_values( '''select cast({target_table_pk} as int) from {target_table_schema}.{target_table} where {target_table_query} order by {target_table_pk}'''.format( **distance_options), 'fetchall')) insert_sql = ''' insert into {target_table_schema}.{target_table}_{column} select (f).* from ( select distance_tool(a.id, a.wkb_geometry) as f from (select {target_table_pk} as id, {target_geometry_column} as wkb_geometry from {target_table_schema}.{target_table} where {target_table_pk} >= {bottom_range_id} and {target_table_pk} <= {top_range_id} and ({target_table_query}) ) a ) s; '''.format(bottom_range_id="{start_id}", top_range_id="{end_id}", **distance_options) for i in range(thread_count): t = MultithreadProcess(queue, insert_sql) t.setDaemon(True) t.start() #populate queue with data rows_per_thread = len(id_list) / thread_count offset = 0 for i in range(thread_count): if i == thread_count - 1: ## last bucket gets any remainder, too last_thread = len(id_list) - 1 else: last_thread = offset + rows_per_thread - 1 rows_to_process = { 'start_id': id_list[offset], 'end_id': id_list[last_thread] } offset += rows_per_thread queue.put(rows_to_process) #wait on the queue until everything has been processed queue.join() add_attribute_idx(distance_options['target_table_schema'], '{target_table}_{column}'.format(**distance_options), 'id') pSql = ''' update {target_table_schema}.{target_table} a set {column} = source_column from (select id as source_id, {column} as source_column from {target_table_schema}.{target_table}_{column}) b where cast(a.{target_table_pk} as int) = b.source_id and ({target_table_query}) '''.format(**distance_options) execute_sql(pSql) pSql = ''' update {target_table_schema}.{target_table} a set {column} = {maximum_distance} where {target_table_query} and {column} = 0 '''.format(**distance_options) execute_sql(pSql) drop_table('{target_table_schema}.{target_table}_{column}'.format(**distance_options))
def aggregate_within_distance(distance_options): thread_count = count_cores() queue = queue_process() source_table_column_list = [] for key, value in distance_options['variable_field_dict'].items(): source_table_column_list += value source_table_column_list = list(set(source_table_column_list)) sql_format = 'out {formatter} float'.format(formatter="{0}") output_field_format = create_sql_calculations(source_table_column_list, sql_format, ', ') sql_format = 'cast({aggregation_type}({formatter}) as float) as {formatter}_{suffix}'.format(formatter="{0}", **distance_options) sql_calculations_format = create_sql_calculations(source_table_column_list, sql_format, ', ') pSql = '''drop function if exists aggregate_within_distance_tool( in_id int, in_wkb_geometry geometry, out id int, {output_field_format}) cascade;'''.format( output_field_format=output_field_format) execute_sql(pSql) pSql = ''' CREATE OR REPLACE FUNCTION aggregate_within_distance_tool( in_id int, in_wkb_geometry geometry, out id int, {output_field_format}) AS $$ select $1 as id, {sql_calculations_format} from (select *, {source_geometry_column} as geometry from {source_table}) ref where ST_DWITHIN( $2, ref.geometry, {distance}) and (ref.{source_table_query}); $$ COST 10000 language SQL STABLE strict; '''.format(source_table=distance_options['source_table'], source_table_query=distance_options['source_table_query'], distance=distance_options['distance'], source_geometry_column=distance_options['source_geometry_column'], output_field_format=output_field_format, sql_calculations_format=sql_calculations_format) execute_sql(pSql) drop_table('{target_table_schema}.{target_table}_{suffix}'.format( target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'], suffix=distance_options['suffix'])) sql_format = '{formatter}_{suffix} float'.format(formatter="{0}", **distance_options) output_table_field_format = create_sql_calculations(source_table_column_list, sql_format, ', ') pSql = '''create table {target_table_schema}.{target_table}_{suffix} (id int, {output_table_field_format});'''.format( target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'], suffix=distance_options['suffix'], output_table_field_format=output_table_field_format) execute_sql(pSql) pSql = 'select cast({target_table_pk} as int) from {target_table_schema}.{target_table} where {target_table_query} order by {target_table_pk}'.format( target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'], target_table_pk=distance_options['target_table_pk'], target_table_query=distance_options['target_table_query']) id_list = flatten(report_sql_values(pSql, 'fetchall')) insert_sql = ''' insert into {target_table_schema}.{target_table}_{suffix} select (f).* from ( select aggregate_within_distance_tool({target_table_pk}, {target_geometry_column}) as f from {target_table_schema}.{target_table} where {target_table_pk} >= {bottom_range_id} and {target_table_pk} <= {top_range_id} and {target_table_query} offset 0) s where (f).id is not null; '''.format( target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'], target_table_query=distance_options['target_table_query'], target_geometry_column=distance_options['target_geometry_column'], source_table=distance_options['source_table'], suffix=distance_options['suffix'], target_table_pk=distance_options['target_table_pk'], bottom_range_id="{start_id}", top_range_id="{end_id}") for i in range(thread_count): t = MultithreadProcess(queue, insert_sql) t.setDaemon(True) t.start() #populate queue with data rows_per_thread = len(id_list) / thread_count offset = 0 for i in range(thread_count): if i == thread_count - 1: ## last bucket gets any remainder, too last_thread = len(id_list) - 1 else: last_thread = offset + rows_per_thread - 1 rows_to_process = { 'start_id': id_list[offset], 'end_id': id_list[last_thread] } offset += rows_per_thread queue.put(rows_to_process) #wait on the queue until everything has been processed queue.join() add_attribute_idx(distance_options['target_table_schema'], '{target_table}_{suffix}'.format(target_table=distance_options['target_table'], suffix=distance_options['suffix']), 'id') count = 1 update_sql_format = '' if len(distance_options['variable_field_dict']) > 0: for key, value in distance_options['variable_field_dict'].items(): update_table_field_format = create_sql_calculations(value, '{formatter}_{suffix}'.format(formatter='b.{0}', **distance_options), ' + ') if count == 1: update_sql_format += key + ' = ' + "(case when {0} is null then 0 else {0} end)".format(update_table_field_format) else: update_sql_format += ', ' + key + ' = ' + "(case when {0} is null then 0 else {0} end)".format(update_table_field_format) count +=1 pSql = ''' update {target_table_schema}.{target_table} a set {update_sql_format} from (select * from {target_table_schema}.{target_table}_{suffix}) b where a.{target_table_pk} = b.id and {target_table_query} '''.format( target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'], target_table_query=distance_options['target_table_query'], target_table_pk=distance_options['target_table_pk'], update_sql_format=update_sql_format, suffix=distance_options['suffix'] ) execute_sql(pSql) drop_table('{target_table_schema}.{target_table}_{suffix}'.format( target_table_schema=distance_options['target_table_schema'], target_table=distance_options['target_table'], suffix=distance_options['suffix']))
def test_config_entity_api__permissions(self): """ Make sure that users only get ConfigEntity's that match their permission settings :return: """ permission_configuration = TestConfigEntityPermissions.config_entity_configuration( ) resource_name = 'config_entity' # Iterate through the test_configurstions and extract a user for each group_key # Make a dict with the user as the key and all the instances from the test_config that the # user corresponds to. This gives a lookup of a user to the config_entities that we expect # the user to be able to view # Create a user->instances dict # Combine our {user1:instances, user2:instances,...} dicts user_to_expected_instances = merge_dict_list_values( *map( lambda test_configuration:\ # Combine our [user, instance] pairs into {user1:instances, user2:instances,...} # Remove null keys (owing to groups with no users) compact_dict(map_to_dict_with_lists( # Each test_configuration has several groups. # For each group resolve a user and return [user, instance] lambda group_key: [ get_first_value_or_none(Group.objects.get(name=group_key).user_set.all()), test_configuration['instance']], test_configuration['groups'].keys())), permission_configuration.test_configuration) ) all_instances = set( unique(flatten(user_to_expected_instances.values()))) for user, instances in user_to_expected_instances.items(): other_instances = all_instances - set(instances) # Fetch all instances with this user and create a lookup so we can test # that the resulting instances are present or not present as expected according to # the permissions response = self.get(resource_name, user=user) result_instance_lookup = map_to_dict( lambda instance_dict: [int(instance_dict['id']), instance_dict], self.deserialize(response)['objects']) for instance in instances: matching_instance = result_instance_lookup.get(instance.id) assert_is_not_none(matching_instance, "User %s should have view permission to instance %s with id %s and key %s but does." % \ (user.username, instance, instance.id, permission_configuration.key_class.Fab.remove( permission_configuration.instance_key_lambda(instance)))) for instance in other_instances: assert_is_none(matching_instance, "User %s should not have view permission to instance %s with id %s and key %s but does." % \ (user.username, instance, instance.id, permission_configuration.key_class.Fab.remove( permission_configuration.instance_key_lambda(instance))))
def __init__(self, db_entity, attribute): self.db_entity = db_entity self.attribute = attribute self.feature_class = db_entity.feature_class self.unique_values = flatten(map(lambda value: value.values(), self.get_unique_values))
def __init__(self, db_entity, attribute): self.db_entity = db_entity self.attribute = attribute self.feature_class = db_entity.feature_class self.unique_values = flatten( map(lambda value: value.values(), self.get_unique_values))