def handle(self, *args, **options): drop_db('sample_data') project = Project.objects.all()[0] client_fixture = ConfigEntityFixture.resolve_config_entity_fixture( project) default_db_entities = client_fixture.default_db_entities for db_entity_config in default_db_entities: importer = ImportData(config_entity=project, db_entity=db_entity_config) importer.target_database = settings.DATABASES['sample_data'] importer.create_target_db_string() # For now we only import data for DbEntity instances with a configured database url connection_dict = postgres_url_to_connection_dict( db_entity_config['url']) # The import database currently stores tables as public.[config_entity.key]_[feature_class._meta.db_table (with schema removed)][_sample (for samples)] # We always use the table name without the word sample for the target table name source_table = "{0}_{1}_{2}".format(project.key, db_entity_config['table'], 'sample') importer._dump_tables_to_target('-t %s' % source_table, source_schema='public', target_schema='public', source_table=source_table, target_table=source_table, connection_dict=connection_dict)
def test__db_entity__permissions__match(self): db_entity_key = DemoDbEntityKey.Fab.ricate scenario = Scenario.objects.get(key='irthorn_base_condition') get_db_entity = lambda key: DbEntityInterest.objects.get( db_entity__key=db_entity_key(key), config_entity=scenario, ).db_entity class_to_default_db_entity_permissions = map_to_dict( lambda config_entity: [ config_entity.__class__, ConfigEntityFixture.resolve_config_entity_fixture( config_entity).default_db_entity_permissions() ], [scenario] + scenario.ancestors) # Create a small configuration to verify permissions PermissionTesting( [ # Test Region DbEntity permissions dict(instance=get_db_entity(DemoDbEntityKey.CPAD_HOLDINGS), groups=class_to_default_db_entity_permissions[Region]), # Test Project permissions dict( instance=get_db_entity( DemoDbEntityKey.PROJECT_EXISTING_LAND_USE_PARCELS), groups=class_to_default_db_entity_permissions[Project], ), # Test Scenario permissions dict( instance=get_db_entity( DemoDbEntityKey.EXISTING_LAND_USE_PARCELS), groups=class_to_default_db_entity_permissions[Scenario], ), ], DemoDbEntityKey).test_permissions()
def test__db_entity__permissions__match(self): db_entity_key = DemoDbEntityKey.Fab.ricate scenario = Scenario.objects.get(key='irthorn_base_condition') get_db_entity = lambda key: DbEntityInterest.objects.get( db_entity__key=db_entity_key(key), config_entity=scenario, ).db_entity class_to_default_db_entity_permissions = map_to_dict(lambda config_entity: [ config_entity.__class__, ConfigEntityFixture.resolve_config_entity_fixture(config_entity).default_db_entity_permissions() ], [scenario]+scenario.ancestors ) # Create a small configuration to verify permissions PermissionTesting([ # Test Region DbEntity permissions dict( instance=get_db_entity(DemoDbEntityKey.CPAD_HOLDINGS), groups=class_to_default_db_entity_permissions[Region] ), # Test Project permissions dict( instance=get_db_entity(DemoDbEntityKey.PROJECT_EXISTING_LAND_USE_PARCELS), groups=class_to_default_db_entity_permissions[Project], ), # Test Scenario permissions dict( instance=get_db_entity(DemoDbEntityKey.EXISTING_LAND_USE_PARCELS), groups=class_to_default_db_entity_permissions[Scenario], ), ], DemoDbEntityKey).test_permissions()
def _update_or_create_config_entity_groups(config_entity): """ Updates/Creates all the ConfigEntity-specific groups of the given ConfigEntity. :param config_entity: :return: """ from footprint.client.configuration.fixture import ConfigEntityFixture # Each ConfigEntity class has its own groups according to the configuration # global groups listed in its configuration # Iterate through the configured global groups and create ConfigEntity-specific versions # The exception is GlobalConfig, whose ConfigEntity Group is the SuperAdmin Group # It returns nothing here since SuperAdmin is a global Group as well, it doesn't need to be # treated as a ConfigEntity Group client_fixture = ConfigEntityFixture.resolve_config_entity_fixture(config_entity) return map( lambda global_group_name: _update_or_create_config_entity_group(config_entity, global_group_name), client_fixture.default_config_entity_groups())
def config_entity_groups(self): """ Get ConfigEntity-specific groups of the given ConfigEntity. :return: Group objects of the ConfigEntity """ from footprint.client.configuration.fixture import ConfigEntityFixture # Each ConfigEntity class has its own groups according to the configuration # global groups listed in its configuration # Iterate through the configured global groups and create ConfigEntity-specific versions client_fixture = ConfigEntityFixture.resolve_config_entity_fixture(self) # Map the fixture to its ConfigEntity Group # GlobalConfig returns nothing here since the SuperAdmin Group is both its Global Group # and ConfigEntity Group return compact(map( lambda global_group_name: self.config_entity_group(global_group_name), client_fixture.default_config_entity_groups() ))
def _update_or_create_config_entity_groups(config_entity): """ Updates/Creates all the ConfigEntity-specific groups of the given ConfigEntity. :param config_entity: :return: """ from footprint.client.configuration.fixture import ConfigEntityFixture # Each ConfigEntity class has its own groups according to the configuration # global groups listed in its configuration # Iterate through the configured global groups and create ConfigEntity-specific versions # The exception is GlobalConfig, whose ConfigEntity Group is the SuperAdmin Group # It returns nothing here since SuperAdmin is a global Group as well, it doesn't need to be # treated as a ConfigEntity Group client_fixture = ConfigEntityFixture.resolve_config_entity_fixture( config_entity) return map( lambda global_group_name: _update_or_create_config_entity_group( config_entity, global_group_name), client_fixture.default_config_entity_groups())
def test_upload_layers(config_entity): # Tests layer upload, creating and then deleting the instances from footprint.client.configuration.fixture import ConfigEntityFixture client_fixture = ConfigEntityFixture.resolve_config_entity_fixture(config_entity) results = [] # Delete anything that already exists matching the DbEntity for db_entity in client_fixture.import_db_entity_configurations(): delete_upload_layer(db_entity, config_entity) for db_entity in client_fixture.import_db_entity_configurations(): result = test_upload_layer(db_entity, config_entity) results.append(result) # Delete what we did for db_entity in client_fixture.import_db_entity_configurations(): delete_upload_layer(db_entity, config_entity) return results
def handle(self, *args, **options): drop_db('sample_data') project = Project.objects.all()[0] client_fixture = ConfigEntityFixture.resolve_config_entity_fixture(project) default_db_entities = client_fixture.default_db_entities for db_entity_config in default_db_entities: importer = ImportData(config_entity=project, db_entity=db_entity_config) importer.target_database = settings.DATABASES['sample_data'] importer.create_target_db_string() # For now we only import data for DbEntity instances with a configured database url connection_dict = postgres_url_to_connection_dict(db_entity_config['url']) # The import database currently stores tables as public.[config_entity.key]_[feature_class._meta.db_table (with schema removed)][_sample (for samples)] # We always use the table name without the word sample for the target table name source_table = "{0}_{1}_{2}".format(project.key, db_entity_config['table'], 'sample') importer._dump_tables_to_target('-t %s' % source_table, source_schema='public', target_schema='public', source_table=source_table, target_table=source_table, connection_dict=connection_dict)
def on_db_entity_post_save_user(sender, **kwargs): """ Give all ConfigEntities Groups in the hierarchy appropriate permissions :param sender: :param kwargs: :return: """ db_entity_interest = InstanceBundle.extract_single_instance(**kwargs) if db_entity_interest.deleted: # Nothing to do for deleted instances return config_entity = db_entity_interest.config_entity.subclassed db_entity = db_entity_interest.db_entity logger.info("Handler: on_db_entity_post_save_user. DbEntity: %s" % db_entity.full_name) # Get the ConfigEntity Group(s) for the DbEntity's ConfigEntity # The groups are configured based on the subclass of ConfigEntity config_entity_groups = config_entity.config_entity_groups() # Sync permissions for the DbEntity # Resolve the default DbEntity permissions for the DbEntity's ConfigEntity from footprint.client.configuration.fixture import ConfigEntityFixture config_entity = db_entity_interest.config_entity.subclassed config_entity_fixture = ConfigEntityFixture.resolve_config_entity_fixture(config_entity) permission_lookup = config_entity_fixture.default_db_entity_permissions() logger.info("For ConfigEntity {config_entity} will apply DbEntity permission to groups {groups} based on permission_lookup {permission_lookup}".format( groups=', '.join(map(lambda group: group.name, config_entity_groups)), config_entity=config_entity.name, permission_lookup=permission_lookup )) sync_config_entity_group_permissions( db_entity, config_entity_groups, permission_lookup, permission_key_class=DbEntityPermissionKey, **kwargs) # Repeat the process for all descendant ConfigEntities, just giving their Groups view permission. # There's no obvious use case for a ConfigEntity Group of a child ConfigEntity having edit permission # to the DbEntity of a parent ConfigEntity. for descendant in config_entity.descendants(): descendant_config_entity_groups = descendant.config_entity_groups() # The permission lookup maps the global version of each group to VIEW permission # Example: The Manager Group of Project Foo would just map to UserGroupKey.MANAGER descendant_permission_lookup = map_to_dict( lambda config_entity_group: [config_entity_group.group_hierarchy.globalized_group().name, PermissionKey.VIEW], descendant_config_entity_groups ) logger.info( "For descendant ConfigEntity {config_entity} will apply DbEntity permission \ to groups {groups} based on permission_lookup {permission_lookup}".format( groups=', '.join(map(lambda group: group.name, descendant_config_entity_groups)), config_entity=descendant.name, permission_lookup=descendant_permission_lookup ) ) # Apply view permissions to the ConfigEntity groups of each descendant ConfigEntity sync_config_entity_group_permissions( db_entity, descendant_config_entity_groups, descendant_permission_lookup, permission_key_class=DbEntityPermissionKey, process_parent_config_entity_groups=False, **kwargs) reset_queries()
def on_db_entity_post_save_user(sender, **kwargs): """ Give all ConfigEntities Groups in the hierarchy appropriate permissions :param sender: :param kwargs: :return: """ db_entity_interest = InstanceBundle.extract_single_instance(**kwargs) if db_entity_interest.deleted: # Nothing to do for deleted instances return config_entity = db_entity_interest.config_entity.subclassed db_entity = db_entity_interest.db_entity logger.info("Handler: on_db_entity_post_save_user. DbEntity: %s" % db_entity.full_name) # Get the ConfigEntity Group(s) for the DbEntity's ConfigEntity # The groups are configured based on the subclass of ConfigEntity config_entity_groups = config_entity.config_entity_groups() # Sync permissions for the DbEntity # Resolve the default DbEntity permissions for the DbEntity's ConfigEntity from footprint.client.configuration.fixture import ConfigEntityFixture config_entity = db_entity_interest.config_entity.subclassed config_entity_fixture = ConfigEntityFixture.resolve_config_entity_fixture( config_entity) permission_lookup = config_entity_fixture.default_db_entity_permissions() logger.info( "For ConfigEntity {config_entity} will apply DbEntity permission to groups {groups} based on permission_lookup {permission_lookup}" .format(groups=', '.join( map(lambda group: group.name, config_entity_groups)), config_entity=config_entity.name, permission_lookup=permission_lookup)) sync_config_entity_group_permissions( db_entity, config_entity_groups, permission_lookup, permission_key_class=DbEntityPermissionKey, **kwargs) # Repeat the process for all descendant ConfigEntities, just giving their Groups view permission. # There's no obvious use case for a ConfigEntity Group of a child ConfigEntity having edit permission # to the DbEntity of a parent ConfigEntity. for descendant in config_entity.descendants(): descendant_config_entity_groups = descendant.config_entity_groups() # The permission lookup maps the global version of each group to VIEW permission # Example: The Manager Group of Project Foo would just map to UserGroupKey.MANAGER descendant_permission_lookup = map_to_dict( lambda config_entity_group: [ config_entity_group.group_hierarchy.globalized_group().name, PermissionKey.VIEW ], descendant_config_entity_groups) logger.info( "For descendant ConfigEntity {config_entity} will apply DbEntity permission \ to groups {groups} based on permission_lookup {permission_lookup}". format(groups=', '.join( map(lambda group: group.name, descendant_config_entity_groups)), config_entity=descendant.name, permission_lookup=descendant_permission_lookup)) # Apply view permissions to the ConfigEntity groups of each descendant ConfigEntity sync_config_entity_group_permissions( db_entity, descendant_config_entity_groups, descendant_permission_lookup, permission_key_class=DbEntityPermissionKey, process_parent_config_entity_groups=False, **kwargs) reset_queries()
def crud_db_entities(config_entity, crud, db_entity_keys=None): """ Creates or updates the db_entities of the ConfigEntity :param config_entity :param crud CrudKey.CREATE, CrudType.CLONE, CrudType.UPDATE, CrudType.SYNC, CrudType.DELETE (unimplemented) :return: """ from footprint.client.configuration.fixture import ConfigEntityFixture # If not present, create the database schema for this ConfigEntity's feature table data PGNamespace.objects.create_schema(config_entity.schema()) client_fixture = ConfigEntityFixture.resolve_config_entity_fixture( config_entity) db_entity_filter = dict(key__in=db_entity_keys) if db_entity_keys else {} # Process the DbEntities from the origin_instance or the db_entity_configuration from the fixtures, # but only the first time this scenario is saved # We only get those scoped (owned) by the class of our config_entity. The scoped above will be adopted automatically # and need not be created. This means a Scenario creates DbEntities scoped to Scenario and adopts those scoped # to Project or Region. It does not clone the latter. if CrudKey.CLONE == crud: # CRUD the DbEntities to match the origin instance origin_instance = config_entity.origin_instance # Clone the DbEntities from the origin ConfigEntity. db_entities = map( lambda source_db_entity: clone_or_update_db_entity_and_interest( config_entity, source_db_entity, DbEntity(schema=config_entity.schema(), feature_class_configuration=FeatureClassConfiguration( geography_scope=FeatureClassCreator( config_entity).resolved_geography_scope.id, class_attrs={ 'config_entity__id': config_entity.id, 'override_db': config_entity.db, 'db_entity_key': source_db_entity.key }))).db_entity, origin_instance.owned_db_entities(**db_entity_filter)) elif crud in [CrudKey.SYNC, CrudKey.CREATE]: #TODO examine the two conditions below more carefully. We want syncing to be the same for clones and non-clones if config_entity.origin_instance: # Syncing previously cloned instance db_entities = config_entity.owned_db_entities(**db_entity_filter) update_or_create_db_entities_and_interests(config_entity, *db_entities) else: # Create or Sync new instance # Get the default DbEntity configurations from the fixture default_db_entities = filter( lambda db_entity: db_entity.key in db_entity_keys if db_entity_keys else True, client_fixture.default_db_entities()) # Find additional owned (not adopted) db_entities that aren't defaults, namely those that were created by the user additional_db_entities = filter( lambda db_entity: db_entity.key in db_entity_keys if db_entity_keys else True, client_fixture.non_default_owned_db_entities()) # Combine the defaults with the additions db_entities = default_db_entities + list(additional_db_entities) update_or_create_db_entities_and_interests(config_entity, *db_entities) elif CrudKey.UPDATE == crud: # No complex updates are enabled for scenarios, so no post-save processing is needed return elif CrudKey.DELETE == crud: raise NotImplementedError("DELETE is not implemented") # Disable the post_post_save signal while saving to prevent an infinite loop previous = config_entity._no_post_save_publishing config_entity._no_post_save_publishing = True # Save post_create changes. This is just to store selected DbEntities config_entity.save() config_entity._no_post_save_publishing = previous reset_queries()
def crud_db_entities(config_entity, crud, db_entity_keys=None): """ Creates or updates the db_entities of the ConfigEntity :param config_entity :param crud CrudKey.CREATE, CrudType.CLONE, CrudType.UPDATE, CrudType.SYNC, CrudType.DELETE (unimplemented) :return: """ from footprint.client.configuration.fixture import ConfigEntityFixture # If not present, create the database schema for this ConfigEntity's feature table data PGNamespace.objects.create_schema(config_entity.schema()) client_fixture = ConfigEntityFixture.resolve_config_entity_fixture(config_entity) db_entity_filter = dict(key__in=db_entity_keys) if db_entity_keys else {} # Process the DbEntities from the origin_instance or the db_entity_configuration from the fixtures, # but only the first time this scenario is saved # We only get those scoped (owned) by the class of our config_entity. The scoped above will be adopted automatically # and need not be created. This means a Scenario creates DbEntities scoped to Scenario and adopts those scoped # to Project or Region. It does not clone the latter. if CrudKey.CLONE == crud: # CRUD the DbEntities to match the origin instance origin_instance = config_entity.origin_instance # Clone the DbEntities from the origin ConfigEntity. db_entities = map( lambda source_db_entity: clone_or_update_db_entity_and_interest( config_entity, source_db_entity, DbEntity( schema=config_entity.schema(), feature_class_configuration=FeatureClassConfiguration( geography_scope=FeatureClassCreator(config_entity).resolved_geography_scope.id, class_attrs={'config_entity__id': config_entity.id, 'override_db': config_entity.db, 'db_entity_key': source_db_entity.key} ) ) ).db_entity, origin_instance.owned_db_entities(**db_entity_filter) ) elif crud in [CrudKey.SYNC, CrudKey.CREATE]: #TODO examine the two conditions below more carefully. We want syncing to be the same for clones and non-clones if config_entity.origin_instance: # Syncing previously cloned instance db_entities = config_entity.owned_db_entities(**db_entity_filter) update_or_create_db_entities_and_interests(config_entity, *db_entities) else: # Create or Sync new instance # Get the default DbEntity configurations from the fixture default_db_entities = filter(lambda db_entity: db_entity.key in db_entity_keys if db_entity_keys else True, client_fixture.default_db_entities()) # Find additional owned (not adopted) db_entities that aren't defaults, namely those that were created by the user additional_db_entities = filter(lambda db_entity: db_entity.key in db_entity_keys if db_entity_keys else True, client_fixture.non_default_owned_db_entities()) # Combine the defaults with the additions db_entities = default_db_entities+list(additional_db_entities) update_or_create_db_entities_and_interests(config_entity, *db_entities) elif CrudKey.UPDATE == crud: # No complex updates are enabled for scenarios, so no post-save processing is needed return elif CrudKey.DELETE == crud: raise NotImplementedError("DELETE is not implemented") # Disable the post_post_save signal while saving to prevent an infinite loop previous = config_entity._no_post_save_publishing config_entity._no_post_save_publishing = True # Save post_create changes. This is just to store selected DbEntities config_entity.save() config_entity._no_post_save_publishing = previous reset_queries()